From dc17ff8f11d129db9e83ab7244769e4eae05e14d Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Tue, 8 Jan 2008 15:46:30 -0500 Subject: Btrfs: Add data=ordered support This forces file data extents down the disk along with the metadata that references them. The current implementation is fairly simple, and just writes out all of the dirty pages in an inode before the commit. Signed-off-by: Chris Mason --- fs/btrfs/ordered-data.c | 221 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 221 insertions(+) create mode 100644 fs/btrfs/ordered-data.c (limited to 'fs/btrfs/ordered-data.c') diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c new file mode 100644 index 00000000000..411aba84d30 --- /dev/null +++ b/fs/btrfs/ordered-data.c @@ -0,0 +1,221 @@ +/* + * Copyright (C) 2007 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License v2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include +#include +#include "ctree.h" +#include "transaction.h" +#include "btrfs_inode.h" + +struct tree_entry { + u64 root_objectid; + u64 objectid; + struct rb_node rb_node; +}; + +/* + * returns > 0 if entry passed (root, objectid) is > entry, + * < 0 if (root, objectid) < entry and zero if they are equal + */ +static int comp_entry(struct tree_entry *entry, u64 root_objectid, + u64 objectid) +{ + if (root_objectid < entry->root_objectid) + return -1; + if (root_objectid > entry->root_objectid) + return 1; + if (objectid < entry->objectid) + return -1; + if (objectid > entry->objectid) + return 1; + return 0; +} + +static struct rb_node *tree_insert(struct rb_root *root, u64 root_objectid, + u64 objectid, struct rb_node *node) +{ + struct rb_node ** p = &root->rb_node; + struct rb_node * parent = NULL; + struct tree_entry *entry; + int comp; + + while(*p) { + parent = *p; + entry = rb_entry(parent, struct tree_entry, rb_node); + + comp = comp_entry(entry, root_objectid, objectid); + if (comp < 0) + p = &(*p)->rb_left; + else if (comp > 0) + p = &(*p)->rb_right; + else + return parent; + } + + rb_link_node(node, parent, p); + rb_insert_color(node, root); + return NULL; +} + +static struct rb_node *__tree_search(struct rb_root *root, u64 root_objectid, + u64 objectid, struct rb_node **prev_ret) +{ + struct rb_node * n = root->rb_node; + struct rb_node *prev = NULL; + struct tree_entry *entry; + struct tree_entry *prev_entry = NULL; + int comp; + + while(n) { + entry = rb_entry(n, struct tree_entry, rb_node); + prev = n; + prev_entry = entry; + comp = comp_entry(entry, root_objectid, objectid); + + if (comp < 0) + n = n->rb_left; + else if (comp > 0) + n = n->rb_right; + else + return n; + } + if (!prev_ret) + return NULL; + + while(prev && comp_entry(prev_entry, root_objectid, objectid) >= 0) { + prev = rb_next(prev); + prev_entry = rb_entry(prev, struct tree_entry, rb_node); + } + *prev_ret = prev; + return NULL; +} + +static inline struct rb_node *tree_search(struct rb_root *root, + u64 root_objectid, u64 objectid) +{ + struct rb_node *prev; + struct rb_node *ret; + ret = __tree_search(root, root_objectid, objectid, &prev); + if (!ret) + return prev; + return ret; +} + +int btrfs_add_ordered_inode(struct inode *inode) +{ + struct btrfs_root *root = BTRFS_I(inode)->root; + u64 root_objectid = root->root_key.objectid; + u64 transid = root->fs_info->running_transaction->transid; + struct tree_entry *entry; + struct rb_node *node; + struct btrfs_ordered_inode_tree *tree; + + if (transid <= BTRFS_I(inode)->ordered_trans) + return 0; + + tree = &root->fs_info->running_transaction->ordered_inode_tree; + + read_lock(&tree->lock); + node = __tree_search(&tree->tree, root_objectid, inode->i_ino, NULL); + read_unlock(&tree->lock); + if (node) { + return 0; + } + + entry = kmalloc(sizeof(*entry), GFP_NOFS); + if (!entry) + return -ENOMEM; + + write_lock(&tree->lock); + entry->objectid = inode->i_ino; + entry->root_objectid = root_objectid; + + node = tree_insert(&tree->tree, root_objectid, + inode->i_ino, &entry->rb_node); + + BTRFS_I(inode)->ordered_trans = transid; + + write_unlock(&tree->lock); + if (node) + kfree(entry); + return 0; +} + +int btrfs_find_first_ordered_inode(struct btrfs_ordered_inode_tree *tree, + u64 *root_objectid, u64 *objectid) +{ + struct tree_entry *entry; + struct rb_node *node; + + write_lock(&tree->lock); + node = tree_search(&tree->tree, *root_objectid, *objectid); + if (!node) { + write_unlock(&tree->lock); + return 0; + } + entry = rb_entry(node, struct tree_entry, rb_node); + + while(comp_entry(entry, *root_objectid, *objectid) >= 0) { + node = rb_next(node); + if (!node) + break; + entry = rb_entry(node, struct tree_entry, rb_node); + } + if (!node) { + write_unlock(&tree->lock); + return 0; + } + + *root_objectid = entry->root_objectid; + *objectid = entry->objectid; + write_unlock(&tree->lock); + return 1; +} + +int btrfs_find_del_first_ordered_inode(struct btrfs_ordered_inode_tree *tree, + u64 *root_objectid, u64 *objectid) +{ + struct tree_entry *entry; + struct rb_node *node; + + write_lock(&tree->lock); + node = tree_search(&tree->tree, *root_objectid, *objectid); + if (!node) { + write_unlock(&tree->lock); + return 0; + } + + entry = rb_entry(node, struct tree_entry, rb_node); + while(comp_entry(entry, *root_objectid, *objectid) >= 0) { + node = rb_next(node); + if (!node) + break; + entry = rb_entry(node, struct tree_entry, rb_node); + } + if (!node) { + write_unlock(&tree->lock); + return 0; + } + + *root_objectid = entry->root_objectid; + *objectid = entry->objectid; + rb_erase(node, &tree->tree); + write_unlock(&tree->lock); + kfree(entry); + return 1; +} -- cgit v1.2.3-70-g09d2 From cee36a03e8f7c6e14aefd497d3acf01bcd3ef153 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Tue, 15 Jan 2008 08:40:48 -0500 Subject: Rework btrfs_drop_inode to avoid scheduling Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 1 + fs/btrfs/disk-io.c | 1 + fs/btrfs/inode.c | 13 +++---------- fs/btrfs/ordered-data.c | 36 ++++++++++++++++++++++++++++++++++++ fs/btrfs/ordered-data.h | 1 + fs/btrfs/transaction.c | 2 ++ 6 files changed, 44 insertions(+), 10 deletions(-) (limited to 'fs/btrfs/ordered-data.c') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index a2c2d6d82c5..1e19f2d8633 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -349,6 +349,7 @@ struct btrfs_fs_info { u64 total_pinned; spinlock_t delalloc_lock; + spinlock_t new_trans_lock; u64 delalloc_bytes; }; /* diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 87daed632f3..cd29922d407 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -639,6 +639,7 @@ struct btrfs_root *open_ctree(struct super_block *sb) INIT_LIST_HEAD(&fs_info->hashers); spin_lock_init(&fs_info->hash_lock); spin_lock_init(&fs_info->delalloc_lock); + spin_lock_init(&fs_info->new_trans_lock); memset(&fs_info->super_kobj, 0, sizeof(fs_info->super_kobj)); init_completion(&fs_info->kobj_unregister); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index e53d2033164..008e3445748 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -993,20 +993,13 @@ fail: void btrfs_drop_inode(struct inode *inode) { - struct btrfs_trans_handle *trans; - struct btrfs_root *root = BTRFS_I(inode)->root; - - if (!BTRFS_I(inode)->ordered_trans) { + if (!BTRFS_I(inode)->ordered_trans || inode->i_nlink) { generic_drop_inode(inode); return; } - /* nasty, but it prevents a deadlock with data=ordered by preventing - * a commit until after this inode is done - */ - trans = btrfs_start_transaction(root, 1); + /* FIXME, make sure this delete actually ends up in the transaction */ + btrfs_del_ordered_inode(inode); generic_drop_inode(inode); - /* note, the inode is now untouchable */ - btrfs_end_transaction(trans, root); } void btrfs_delete_inode(struct inode *inode) diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 411aba84d30..b56011baa17 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -219,3 +219,39 @@ int btrfs_find_del_first_ordered_inode(struct btrfs_ordered_inode_tree *tree, kfree(entry); return 1; } + +static int __btrfs_del_ordered_inode(struct btrfs_ordered_inode_tree *tree, + u64 root_objectid, u64 objectid) +{ + struct tree_entry *entry; + struct rb_node *node; + struct rb_node *prev; + + write_lock(&tree->lock); + node = __tree_search(&tree->tree, root_objectid, objectid, &prev); + if (!node) { + write_unlock(&tree->lock); + return 0; + } + rb_erase(node, &tree->tree); + write_unlock(&tree->lock); + entry = rb_entry(node, struct tree_entry, rb_node); + kfree(entry); + return 1; +} + +int btrfs_del_ordered_inode(struct inode *inode) +{ + struct btrfs_root *root = BTRFS_I(inode)->root; + u64 root_objectid = root->root_key.objectid; + + spin_lock(&root->fs_info->new_trans_lock); + if (root->fs_info->running_transaction) { + struct btrfs_ordered_inode_tree *tree; + tree = &root->fs_info->running_transaction->ordered_inode_tree; + __btrfs_del_ordered_inode(tree, root_objectid, inode->i_ino); + } + spin_unlock(&root->fs_info->new_trans_lock); + return 0; +} + diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index aaf9eb14271..26b26212865 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -36,4 +36,5 @@ int btrfs_find_del_first_ordered_inode(struct btrfs_ordered_inode_tree *tree, u64 *root_objectid, u64 *objectid); int btrfs_find_first_ordered_inode(struct btrfs_ordered_inode_tree *tree, u64 *root_objectid, u64 *objectid); +int btrfs_del_ordered_inode(struct inode *inode); #endif diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 614903f5c88..a3205808ab2 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -699,7 +699,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, BUG_ON(ret); cur_trans = root->fs_info->running_transaction; + spin_lock(&root->fs_info->new_trans_lock); root->fs_info->running_transaction = NULL; + spin_unlock(&root->fs_info->new_trans_lock); btrfs_set_super_generation(&root->fs_info->super_copy, cur_trans->transid); btrfs_set_super_root(&root->fs_info->super_copy, -- cgit v1.2.3-70-g09d2 From 2da98f003f4788b0a72c5f87bc55b061f65f30fa Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 16 Jan 2008 11:44:43 -0500 Subject: Btrfs: Run igrab on data=ordered inodes to prevent deadlocks during writeout Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 2 +- fs/btrfs/inode.c | 32 ++++++++++++++++++++++++++------ fs/btrfs/ordered-data.c | 10 ++++++++-- fs/btrfs/super.c | 2 +- fs/btrfs/transaction.c | 1 + 5 files changed, 37 insertions(+), 10 deletions(-) (limited to 'fs/btrfs/ordered-data.c') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 1e19f2d8633..fa65fe027e4 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1145,7 +1145,7 @@ int btrfs_check_free_space(struct btrfs_root *root, u64 num_required, int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page); int btrfs_readpage(struct file *file, struct page *page); void btrfs_delete_inode(struct inode *inode); -void btrfs_drop_inode(struct inode *inode); +void btrfs_put_inode(struct inode *inode); void btrfs_read_locked_inode(struct inode *inode); int btrfs_write_inode(struct inode *inode, int wait); void btrfs_dirty_inode(struct inode *inode); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index f83f88ca8ac..c1ac0bcbb46 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -601,6 +601,7 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry) { struct btrfs_root *root; struct btrfs_trans_handle *trans; + struct inode *inode = dentry->d_inode; int ret; unsigned long nr = 0; @@ -617,6 +618,18 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry) ret = btrfs_unlink_trans(trans, root, dir, dentry); nr = trans->blocks_used; + if (inode->i_nlink == 0) { + int found; + /* if the inode isn't linked anywhere, + * we don't need to worry about + * data=ordered + */ + found = btrfs_del_ordered_inode(inode); + if (found == 1) { + atomic_dec(&inode->i_count); + } + } + btrfs_end_transaction(trans, root); fail: mutex_unlock(&root->fs_info->fs_mutex); @@ -993,15 +1006,22 @@ fail: return err; } -void btrfs_drop_inode(struct inode *inode) +void btrfs_put_inode(struct inode *inode) { - if (!BTRFS_I(inode)->ordered_trans || inode->i_nlink) { - generic_drop_inode(inode); + int ret; + + if (!BTRFS_I(inode)->ordered_trans) { + return; + } + + if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY) || + mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)) return; + + ret = btrfs_del_ordered_inode(inode); + if (ret == 1) { + atomic_dec(&inode->i_count); } - /* FIXME, make sure this delete actually ends up in the transaction */ - btrfs_del_ordered_inode(inode); - generic_drop_inode(inode); } void btrfs_delete_inode(struct inode *inode) diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index b56011baa17..cba2b623d02 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -153,6 +153,8 @@ int btrfs_add_ordered_inode(struct inode *inode) write_unlock(&tree->lock); if (node) kfree(entry); + else + igrab(inode); return 0; } @@ -221,6 +223,7 @@ int btrfs_find_del_first_ordered_inode(struct btrfs_ordered_inode_tree *tree, } static int __btrfs_del_ordered_inode(struct btrfs_ordered_inode_tree *tree, + struct inode *inode, u64 root_objectid, u64 objectid) { struct tree_entry *entry; @@ -234,6 +237,7 @@ static int __btrfs_del_ordered_inode(struct btrfs_ordered_inode_tree *tree, return 0; } rb_erase(node, &tree->tree); + BTRFS_I(inode)->ordered_trans = 0; write_unlock(&tree->lock); entry = rb_entry(node, struct tree_entry, rb_node); kfree(entry); @@ -244,14 +248,16 @@ int btrfs_del_ordered_inode(struct inode *inode) { struct btrfs_root *root = BTRFS_I(inode)->root; u64 root_objectid = root->root_key.objectid; + int ret = 0; spin_lock(&root->fs_info->new_trans_lock); if (root->fs_info->running_transaction) { struct btrfs_ordered_inode_tree *tree; tree = &root->fs_info->running_transaction->ordered_inode_tree; - __btrfs_del_ordered_inode(tree, root_objectid, inode->i_ino); + ret = __btrfs_del_ordered_inode(tree, inode, root_objectid, + inode->i_ino); } spin_unlock(&root->fs_info->new_trans_lock); - return 0; + return ret; } diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 4deea393ca9..e506de3168b 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -425,7 +425,7 @@ static struct file_system_type btrfs_fs_type = { static struct super_operations btrfs_super_ops = { .delete_inode = btrfs_delete_inode, - .drop_inode = btrfs_drop_inode, + .put_inode = btrfs_put_inode, .put_super = btrfs_put_super, .read_inode = btrfs_read_locked_inode, .write_super = btrfs_write_super, diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index a3205808ab2..08f7a188dc3 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -521,6 +521,7 @@ int btrfs_write_ordered_inodes(struct btrfs_trans_handle *trans, if (inode) { if (S_ISREG(inode->i_mode)) filemap_write_and_wait(inode->i_mapping); + atomic_dec(&inode->i_count); iput(inode); } mutex_lock(&root->fs_info->fs_mutex); -- cgit v1.2.3-70-g09d2 From 4d5e74bc0aec3f54b7e429d77b7c35de042c507d Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 16 Jan 2008 16:09:22 -0500 Subject: Btrfs: Fix data=ordered vs wait_on_inode deadlock on older kernels Using ilookup5 during data=ordered writeback could deadlock on I_LOCK. This saves a pointer to the inode instead. Signed-off-by: Chris Mason --- fs/btrfs/ordered-data.c | 12 ++++++++++-- fs/btrfs/ordered-data.h | 6 ++++-- fs/btrfs/transaction.c | 30 +++++++++++++----------------- 3 files changed, 27 insertions(+), 21 deletions(-) (limited to 'fs/btrfs/ordered-data.c') diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index cba2b623d02..3ee51e10c18 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -25,6 +25,7 @@ struct tree_entry { u64 root_objectid; u64 objectid; + struct inode *inode; struct rb_node rb_node; }; @@ -144,6 +145,7 @@ int btrfs_add_ordered_inode(struct inode *inode) write_lock(&tree->lock); entry->objectid = inode->i_ino; entry->root_objectid = root_objectid; + entry->inode = inode; node = tree_insert(&tree->tree, root_objectid, inode->i_ino, &entry->rb_node); @@ -159,7 +161,8 @@ int btrfs_add_ordered_inode(struct inode *inode) } int btrfs_find_first_ordered_inode(struct btrfs_ordered_inode_tree *tree, - u64 *root_objectid, u64 *objectid) + u64 *root_objectid, u64 *objectid, + struct inode **inode) { struct tree_entry *entry; struct rb_node *node; @@ -184,13 +187,16 @@ int btrfs_find_first_ordered_inode(struct btrfs_ordered_inode_tree *tree, } *root_objectid = entry->root_objectid; + *inode = entry->inode; + atomic_inc(&entry->inode->i_count); *objectid = entry->objectid; write_unlock(&tree->lock); return 1; } int btrfs_find_del_first_ordered_inode(struct btrfs_ordered_inode_tree *tree, - u64 *root_objectid, u64 *objectid) + u64 *root_objectid, u64 *objectid, + struct inode **inode) { struct tree_entry *entry; struct rb_node *node; @@ -216,6 +222,8 @@ int btrfs_find_del_first_ordered_inode(struct btrfs_ordered_inode_tree *tree, *root_objectid = entry->root_objectid; *objectid = entry->objectid; + *inode = entry->inode; + atomic_inc(&entry->inode->i_count); rb_erase(node, &tree->tree); write_unlock(&tree->lock); kfree(entry); diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index 26b26212865..f25c6771ec6 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -33,8 +33,10 @@ btrfs_ordered_inode_tree_init(struct btrfs_ordered_inode_tree *t) int btrfs_add_ordered_inode(struct inode *inode); int btrfs_find_del_first_ordered_inode(struct btrfs_ordered_inode_tree *tree, - u64 *root_objectid, u64 *objectid); + u64 *root_objectid, u64 *objectid, + struct inode **inode); int btrfs_find_first_ordered_inode(struct btrfs_ordered_inode_tree *tree, - u64 *root_objectid, u64 *objectid); + u64 *root_objectid, u64 *objectid, + struct inode **inode); int btrfs_del_ordered_inode(struct inode *inode); #endif diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 08f7a188dc3..b6bbfc179c2 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -490,19 +490,17 @@ int btrfs_write_ordered_inodes(struct btrfs_trans_handle *trans, while(1) { ret = btrfs_find_first_ordered_inode( &cur_trans->ordered_inode_tree, - &root_objectid, &objectid); + &root_objectid, &objectid, &inode); if (!ret) break; mutex_unlock(&root->fs_info->trans_mutex); mutex_unlock(&root->fs_info->fs_mutex); - inode = btrfs_ilookup(root->fs_info->sb, objectid, - root_objectid); - if (inode) { - if (S_ISREG(inode->i_mode)) - filemap_fdatawrite(inode->i_mapping); - iput(inode); - } + + if (S_ISREG(inode->i_mode)) + filemap_fdatawrite(inode->i_mapping); + iput(inode); + mutex_lock(&root->fs_info->fs_mutex); mutex_lock(&root->fs_info->trans_mutex); } @@ -511,19 +509,17 @@ int btrfs_write_ordered_inodes(struct btrfs_trans_handle *trans, objectid = 0; ret = btrfs_find_del_first_ordered_inode( &cur_trans->ordered_inode_tree, - &root_objectid, &objectid); + &root_objectid, &objectid, &inode); if (!ret) break; mutex_unlock(&root->fs_info->trans_mutex); mutex_unlock(&root->fs_info->fs_mutex); - inode = btrfs_ilookup(root->fs_info->sb, objectid, - root_objectid); - if (inode) { - if (S_ISREG(inode->i_mode)) - filemap_write_and_wait(inode->i_mapping); - atomic_dec(&inode->i_count); - iput(inode); - } + + if (S_ISREG(inode->i_mode)) + filemap_write_and_wait(inode->i_mapping); + atomic_dec(&inode->i_count); + iput(inode); + mutex_lock(&root->fs_info->fs_mutex); mutex_lock(&root->fs_info->trans_mutex); } -- cgit v1.2.3-70-g09d2 From 81d7ed29ff6bdec903c36c26b386e16c014993b2 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 25 Apr 2008 08:51:48 -0400 Subject: Btrfs: Throttle file_write when data=ordered is flushing the inode Signed-off-by: Chris Mason --- fs/btrfs/btrfs_inode.h | 1 + fs/btrfs/file.c | 1 + fs/btrfs/inode.c | 4 ++++ fs/btrfs/ordered-data.c | 13 +++++++++++++ fs/btrfs/ordered-data.h | 1 + fs/btrfs/transaction.c | 10 ++++++++-- 6 files changed, 28 insertions(+), 2 deletions(-) (limited to 'fs/btrfs/ordered-data.c') diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index fe6ef8e3416..5ba83894c8b 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -31,6 +31,7 @@ struct btrfs_inode { struct extent_io_tree io_tree; struct extent_io_tree io_failure_tree; struct inode vfs_inode; + atomic_t ordered_writeback; u64 ordered_trans; /* diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 5d537f26dc8..8effdf4f5d6 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -977,6 +977,7 @@ out_nolock: (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT); } current->backing_dev_info = NULL; + btrfs_ordered_throttle(root, inode); return num_written ? num_written : err; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index d58b4d5a1fe..b31f52d4f2c 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1419,6 +1419,7 @@ static int btrfs_init_locked_inode(struct inode *inode, void *p) inode->i_mapping, GFP_NOFS); extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree, inode->i_mapping, GFP_NOFS); + atomic_set(&BTRFS_I(inode)->ordered_writeback, 0); return 0; } @@ -1728,6 +1729,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, inode->i_mapping, GFP_NOFS); extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree, inode->i_mapping, GFP_NOFS); + atomic_set(&BTRFS_I(inode)->ordered_writeback, 0); BTRFS_I(inode)->delalloc_bytes = 0; BTRFS_I(inode)->root = root; @@ -1956,6 +1958,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree, inode->i_mapping, GFP_NOFS); BTRFS_I(inode)->delalloc_bytes = 0; + atomic_set(&BTRFS_I(inode)->ordered_writeback, 0); BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; } dir->i_sb->s_dirt = 1; @@ -3292,6 +3295,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree, inode->i_mapping, GFP_NOFS); BTRFS_I(inode)->delalloc_bytes = 0; + atomic_set(&BTRFS_I(inode)->ordered_writeback, 0); BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; } dir->i_sb->s_dirt = 1; diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 3ee51e10c18..b474902c90e 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -269,3 +269,16 @@ int btrfs_del_ordered_inode(struct inode *inode) return ret; } +int btrfs_ordered_throttle(struct btrfs_root *root, struct inode *inode) +{ + struct btrfs_transaction *cur = root->fs_info->running_transaction; + while(cur == root->fs_info->running_transaction && + atomic_read(&BTRFS_I(inode)->ordered_writeback)) { +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) + congestion_wait(WRITE, HZ/20); +#else + blk_congestion_wait(WRITE, HZ/20); +#endif + } + return 0; +} diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index f25c6771ec6..29047e0abaa 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -39,4 +39,5 @@ int btrfs_find_first_ordered_inode(struct btrfs_ordered_inode_tree *tree, u64 *root_objectid, u64 *objectid, struct inode **inode); int btrfs_del_ordered_inode(struct inode *inode); +int btrfs_ordered_throttle(struct btrfs_root *root, struct inode *inode); #endif diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 1ed179c020a..c85cb48d95e 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -512,8 +512,11 @@ int btrfs_write_ordered_inodes(struct btrfs_trans_handle *trans, mutex_unlock(&root->fs_info->trans_mutex); mutex_unlock(&root->fs_info->fs_mutex); - if (S_ISREG(inode->i_mode)) + if (S_ISREG(inode->i_mode)) { + atomic_inc(&BTRFS_I(inode)->ordered_writeback); filemap_fdatawrite(inode->i_mapping); + atomic_dec(&BTRFS_I(inode)->ordered_writeback); + } iput(inode); mutex_lock(&root->fs_info->fs_mutex); @@ -530,8 +533,11 @@ int btrfs_write_ordered_inodes(struct btrfs_trans_handle *trans, mutex_unlock(&root->fs_info->trans_mutex); mutex_unlock(&root->fs_info->fs_mutex); - if (S_ISREG(inode->i_mode)) + if (S_ISREG(inode->i_mode)) { + atomic_inc(&BTRFS_I(inode)->ordered_writeback); filemap_write_and_wait(inode->i_mapping); + atomic_dec(&BTRFS_I(inode)->ordered_writeback); + } atomic_dec(&inode->i_count); iput(inode); -- cgit v1.2.3-70-g09d2 From d6bfde8765668c8687de72f7a40f52acdf4f2f19 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 30 Apr 2008 13:59:35 -0400 Subject: Btrfs: Fixes for 2.6.18 enterprise kernels 2.6.18 seems to get caught in an infinite loop when cancel_rearming_delayed_workqueue is called more than once, so this switches to cancel_delayed_work, which is arguably more correct. Also, balance_dirty_pages can run into problems with 2.6.18 based kernels because it doesn't have the per-bdi dirty limits. This avoids calling balance_dirty_pages on the btree inode unless there is actually something to balance, which is a good optimization in general. Finally there's a compile fix for ordered-data.h Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 24 +++++++++++++++++++----- fs/btrfs/ordered-data.c | 1 + fs/btrfs/transaction.c | 8 ++++++-- 3 files changed, 26 insertions(+), 7 deletions(-) (limited to 'fs/btrfs/ordered-data.c') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index e35e70165b5..fabc31b334b 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1548,6 +1548,7 @@ int close_ctree(struct btrfs_root *root) btrfs_commit_transaction(trans, root); ret = btrfs_write_and_wait_transaction(NULL, root); BUG_ON(ret); + write_ctree_super(NULL, root); mutex_unlock(&fs_info->fs_mutex); @@ -1583,17 +1584,17 @@ int close_ctree(struct btrfs_root *root) extent_io_tree_empty_lru(&fs_info->extent_ins); extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree); - flush_workqueue(end_io_workqueue); flush_workqueue(async_submit_workqueue); + flush_workqueue(end_io_workqueue); truncate_inode_pages(fs_info->btree_inode->i_mapping, 0); - flush_workqueue(end_io_workqueue); - destroy_workqueue(end_io_workqueue); - flush_workqueue(async_submit_workqueue); destroy_workqueue(async_submit_workqueue); + flush_workqueue(end_io_workqueue); + destroy_workqueue(end_io_workqueue); + iput(fs_info->btree_inode); #if 0 while(!list_empty(&fs_info->hashers)) { @@ -1663,8 +1664,21 @@ void btrfs_throttle(struct btrfs_root *root) void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr) { - balance_dirty_pages_ratelimited_nr( + struct extent_io_tree *tree; + u64 num_dirty; + u64 start = 0; + unsigned long thresh = 16 * 1024 * 1024; + tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree; + + if (current_is_pdflush()) + return; + + num_dirty = count_range_bits(tree, &start, (u64)-1, + thresh, EXTENT_DIRTY); + if (num_dirty > thresh) { + balance_dirty_pages_ratelimited_nr( root->fs_info->btree_inode->i_mapping, 1); + } } void btrfs_set_buffer_defrag(struct extent_buffer *buf) diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index b474902c90e..3814cb0246b 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -18,6 +18,7 @@ #include #include +#include #include "ctree.h" #include "transaction.h" #include "btrfs_inode.h" diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index c85cb48d95e..9826942fa18 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -814,6 +814,9 @@ void btrfs_transaction_cleaner(struct work_struct *work) int ret; mutex_lock(&root->fs_info->fs_mutex); + if (root->fs_info->closing) + goto out; + mutex_lock(&root->fs_info->trans_mutex); cur = root->fs_info->running_transaction; if (!cur) { @@ -838,12 +841,13 @@ out: void btrfs_transaction_queue_work(struct btrfs_root *root, int delay) { - queue_delayed_work(trans_wq, &root->fs_info->trans_work, delay); + if (!root->fs_info->closing) + queue_delayed_work(trans_wq, &root->fs_info->trans_work, delay); } void btrfs_transaction_flush_work(struct btrfs_root *root) { - cancel_rearming_delayed_workqueue(trans_wq, &root->fs_info->trans_work); + cancel_delayed_work(&root->fs_info->trans_work); flush_workqueue(trans_wq); } -- cgit v1.2.3-70-g09d2 From e1b81e6761bd8419146d4bbe0aadd8b2d348a01c Mon Sep 17 00:00:00 2001 From: Mingming Date: Tue, 27 May 2008 10:55:43 -0400 Subject: btrfs delete ordered inode handling fix Use btrfs_release_file instead of a put_inode call Signed-off-by: Chris Mason --- fs/btrfs/file.c | 7 +++++++ fs/btrfs/inode.c | 24 +----------------------- fs/btrfs/ordered-data.c | 21 ++++++++++++++------- fs/btrfs/ordered-data.h | 2 +- fs/btrfs/super.c | 1 - 5 files changed, 23 insertions(+), 32 deletions(-) (limited to 'fs/btrfs/ordered-data.c') diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index d968900c697..c4fa4664a45 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -978,6 +978,12 @@ out_nolock: return num_written ? num_written : err; } +static int btrfs_release_file (struct inode * inode, struct file * filp) +{ + btrfs_del_ordered_inode(inode); + return 0; +} + static int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync) { @@ -1044,6 +1050,7 @@ struct file_operations btrfs_file_operations = { .write = btrfs_file_write, .mmap = btrfs_file_mmap, .open = generic_file_open, + .release = btrfs_release_file, .fsync = btrfs_sync_file, .unlocked_ioctl = btrfs_ioctl, #ifdef CONFIG_COMPAT diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 40b4a8ec17f..1569fb86451 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -857,15 +857,11 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry) nr = trans->blocks_used; if (inode->i_nlink == 0) { - int found; /* if the inode isn't linked anywhere, * we don't need to worry about * data=ordered */ - found = btrfs_del_ordered_inode(inode); - if (found == 1) { - atomic_dec(&inode->i_count); - } + btrfs_del_ordered_inode(inode); } btrfs_end_transaction(trans, root); @@ -1271,24 +1267,6 @@ fail: return err; } -void btrfs_put_inode(struct inode *inode) -{ - int ret; - - if (!BTRFS_I(inode)->ordered_trans) { - return; - } - - if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY) || - mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)) - return; - - ret = btrfs_del_ordered_inode(inode); - if (ret == 1) { - atomic_dec(&inode->i_count); - } -} - void btrfs_delete_inode(struct inode *inode) { struct btrfs_trans_handle *trans; diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 3814cb0246b..8dd8180183f 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -231,7 +231,7 @@ int btrfs_find_del_first_ordered_inode(struct btrfs_ordered_inode_tree *tree, return 1; } -static int __btrfs_del_ordered_inode(struct btrfs_ordered_inode_tree *tree, +static void __btrfs_del_ordered_inode(struct btrfs_ordered_inode_tree *tree, struct inode *inode, u64 root_objectid, u64 objectid) { @@ -243,31 +243,38 @@ static int __btrfs_del_ordered_inode(struct btrfs_ordered_inode_tree *tree, node = __tree_search(&tree->tree, root_objectid, objectid, &prev); if (!node) { write_unlock(&tree->lock); - return 0; + return; } rb_erase(node, &tree->tree); BTRFS_I(inode)->ordered_trans = 0; write_unlock(&tree->lock); + atomic_dec(&inode->i_count); entry = rb_entry(node, struct tree_entry, rb_node); kfree(entry); - return 1; + return; } -int btrfs_del_ordered_inode(struct inode *inode) +void btrfs_del_ordered_inode(struct inode *inode) { struct btrfs_root *root = BTRFS_I(inode)->root; u64 root_objectid = root->root_key.objectid; - int ret = 0; + + if (!BTRFS_I(inode)->ordered_trans) { + return; + } + + if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY) || + mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)) + return; spin_lock(&root->fs_info->new_trans_lock); if (root->fs_info->running_transaction) { struct btrfs_ordered_inode_tree *tree; tree = &root->fs_info->running_transaction->ordered_inode_tree; - ret = __btrfs_del_ordered_inode(tree, inode, root_objectid, + __btrfs_del_ordered_inode(tree, inode, root_objectid, inode->i_ino); } spin_unlock(&root->fs_info->new_trans_lock); - return ret; } int btrfs_ordered_throttle(struct btrfs_root *root, struct inode *inode) diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index 29047e0abaa..c515c4b3999 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -38,6 +38,6 @@ int btrfs_find_del_first_ordered_inode(struct btrfs_ordered_inode_tree *tree, int btrfs_find_first_ordered_inode(struct btrfs_ordered_inode_tree *tree, u64 *root_objectid, u64 *objectid, struct inode **inode); -int btrfs_del_ordered_inode(struct inode *inode); +void btrfs_del_ordered_inode(struct inode *inode); int btrfs_ordered_throttle(struct btrfs_root *root, struct inode *inode); #endif diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 273a5b511f5..05029656e42 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -487,7 +487,6 @@ static void btrfs_unlockfs(struct super_block *sb) static struct super_operations btrfs_super_ops = { .delete_inode = btrfs_delete_inode, - .put_inode = btrfs_put_inode, .put_super = btrfs_put_super, .write_super = btrfs_write_super, .sync_fs = btrfs_sync_fs, -- cgit v1.2.3-70-g09d2 From 594a24eb0e7fa8413f8b443863be4b7c72bfde9f Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 25 Jun 2008 16:01:30 -0400 Subject: Fix btrfs_del_ordered_inode to allow forcing the drop during unlinks This allows us to delete an unlinked inode with dirty pages from the list instead of forcing commit to write these out before deleting the inode. Signed-off-by: Chris Mason --- fs/btrfs/ctree.c | 18 +++++++++++++----- fs/btrfs/file.c | 2 +- fs/btrfs/inode.c | 4 +--- fs/btrfs/ordered-data.c | 6 +++--- fs/btrfs/ordered-data.h | 2 +- 5 files changed, 19 insertions(+), 13 deletions(-) (limited to 'fs/btrfs/ordered-data.c') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 1b756fae279..9601241e552 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1245,7 +1245,10 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root int level; int should_reada = p->reada; int lowest_unlock = 1; + int blocksize; u8 lowest_level = 0; + u64 blocknr; + u64 gen; lowest_level = p->lowest_level; WARN_ON(lowest_level && ins_len); @@ -1320,11 +1323,12 @@ again: reada_for_search(root, p, level, slot, key->objectid); - tmp = btrfs_find_tree_block(root, - btrfs_node_blockptr(b, slot), - btrfs_level_size(root, level - 1)); - if (tmp && btrfs_buffer_uptodate(tmp, - btrfs_node_ptr_generation(b, slot))) { + blocknr = btrfs_node_blockptr(b, slot); + gen = btrfs_node_ptr_generation(b, slot); + blocksize = btrfs_level_size(root, level - 1); + + tmp = btrfs_find_tree_block(root, blocknr, blocksize); + if (tmp && btrfs_buffer_uptodate(tmp, gen)) { b = tmp; } else { /* @@ -1334,6 +1338,10 @@ again: */ if (level > 1) { btrfs_release_path(NULL, p); + if (tmp) + free_extent_buffer(tmp); + tmp = read_tree_block(root, blocknr, + blocksize, gen); if (tmp) free_extent_buffer(tmp); goto again; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 18bbe108a0e..b7f8f92daf8 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -976,7 +976,7 @@ out_nolock: int btrfs_release_file(struct inode * inode, struct file * filp) { - btrfs_del_ordered_inode(inode); + btrfs_del_ordered_inode(inode, 0); if (filp->private_data) btrfs_ioctl_trans_end(filp); return 0; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index b2251e27ac8..cf27b598462 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -852,7 +852,7 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry) * we don't need to worry about * data=ordered */ - btrfs_del_ordered_inode(inode); + btrfs_del_ordered_inode(inode, 1); } btrfs_end_transaction(trans, root); @@ -1276,14 +1276,12 @@ void btrfs_delete_inode(struct inode *inode) btrfs_end_transaction(trans, root); btrfs_btree_balance_dirty(root, nr); - btrfs_throttle(root); return; no_delete_lock: nr = trans->blocks_used; btrfs_end_transaction(trans, root); btrfs_btree_balance_dirty(root, nr); - btrfs_throttle(root); no_delete: clear_inode(inode); } diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 8dd8180183f..5e4c0d95ce4 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -254,7 +254,7 @@ static void __btrfs_del_ordered_inode(struct btrfs_ordered_inode_tree *tree, return; } -void btrfs_del_ordered_inode(struct inode *inode) +void btrfs_del_ordered_inode(struct inode *inode, int force) { struct btrfs_root *root = BTRFS_I(inode)->root; u64 root_objectid = root->root_key.objectid; @@ -263,8 +263,8 @@ void btrfs_del_ordered_inode(struct inode *inode) return; } - if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY) || - mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)) + if (!force && (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY) || + mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK))) return; spin_lock(&root->fs_info->new_trans_lock); diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index c515c4b3999..4fa78736423 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -38,6 +38,6 @@ int btrfs_find_del_first_ordered_inode(struct btrfs_ordered_inode_tree *tree, int btrfs_find_first_ordered_inode(struct btrfs_ordered_inode_tree *tree, u64 *root_objectid, u64 *objectid, struct inode **inode); -void btrfs_del_ordered_inode(struct inode *inode); +void btrfs_del_ordered_inode(struct inode *inode, int force); int btrfs_ordered_throttle(struct btrfs_root *root, struct inode *inode); #endif -- cgit v1.2.3-70-g09d2 From 1b1e2135dc1e4efbcf25ac9ac9979316d4e1193e Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 25 Jun 2008 16:01:31 -0400 Subject: Btrfs: Add a per-inode csum mutex to avoid races creating csum items Signed-off-by: Chris Mason --- fs/btrfs/btrfs_inode.h | 1 + fs/btrfs/file.c | 6 +++--- fs/btrfs/inode.c | 6 ++++++ fs/btrfs/ordered-data.c | 5 +++-- fs/btrfs/transaction.c | 2 +- fs/btrfs/tree-defrag.c | 7 +++++++ 6 files changed, 21 insertions(+), 6 deletions(-) (limited to 'fs/btrfs/ordered-data.c') diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 5ba83894c8b..40b4e0c9cd0 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -30,6 +30,7 @@ struct btrfs_inode { struct extent_map_tree extent_tree; struct extent_io_tree io_tree; struct extent_io_tree io_failure_tree; + struct mutex csum_mutex; struct inode vfs_inode; atomic_t ordered_writeback; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index ece221cba90..8037792f878 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -267,13 +267,13 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans, /* FIXME...EIEIO, ENOSPC and more */ /* insert any holes we need to create */ - if (isize < end_pos) { + if (isize < start_pos) { u64 last_pos_in_file; u64 hole_size; u64 mask = root->sectorsize - 1; last_pos_in_file = (isize + mask) & ~mask; - hole_size = (end_pos - last_pos_in_file + mask) & ~mask; - if (last_pos_in_file < end_pos) { + hole_size = (start_pos - last_pos_in_file + mask) & ~mask; + if (last_pos_in_file < start_pos) { err = btrfs_drop_extents(trans, root, inode, last_pos_in_file, last_pos_in_file + hole_size, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index bbba3350d02..d39433dfb2c 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -351,7 +351,9 @@ int __btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, trans = btrfs_start_transaction(root, 1); btrfs_set_trans_block_group(trans, inode); + mutex_lock(&BTRFS_I(inode)->csum_mutex); btrfs_csum_file_blocks(trans, root, inode, bio, sums); + mutex_unlock(&BTRFS_I(inode)->csum_mutex); ret = btrfs_end_transaction(trans, root); BUG_ON(ret); @@ -1400,6 +1402,7 @@ static int btrfs_init_locked_inode(struct inode *inode, void *p) inode->i_mapping, GFP_NOFS); extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree, inode->i_mapping, GFP_NOFS); + mutex_init(&BTRFS_I(inode)->csum_mutex); atomic_set(&BTRFS_I(inode)->ordered_writeback, 0); return 0; } @@ -1701,6 +1704,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, inode->i_mapping, GFP_NOFS); extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree, inode->i_mapping, GFP_NOFS); + mutex_init(&BTRFS_I(inode)->csum_mutex); atomic_set(&BTRFS_I(inode)->ordered_writeback, 0); BTRFS_I(inode)->delalloc_bytes = 0; BTRFS_I(inode)->root = root; @@ -1924,6 +1928,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, inode->i_mapping, GFP_NOFS); extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree, inode->i_mapping, GFP_NOFS); + mutex_init(&BTRFS_I(inode)->csum_mutex); BTRFS_I(inode)->delalloc_bytes = 0; atomic_set(&BTRFS_I(inode)->ordered_writeback, 0); BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; @@ -2862,6 +2867,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, inode->i_mapping, GFP_NOFS); extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree, inode->i_mapping, GFP_NOFS); + mutex_init(&BTRFS_I(inode)->csum_mutex); BTRFS_I(inode)->delalloc_bytes = 0; atomic_set(&BTRFS_I(inode)->ordered_writeback, 0); BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 5e4c0d95ce4..254da822566 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -152,12 +152,13 @@ int btrfs_add_ordered_inode(struct inode *inode) inode->i_ino, &entry->rb_node); BTRFS_I(inode)->ordered_trans = transid; + if (!node) + igrab(inode); write_unlock(&tree->lock); + if (node) kfree(entry); - else - igrab(inode); return 0; } diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 0c53ff775b9..8e909cb97c6 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -477,7 +477,7 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root, if (err) ret = err; nr = trans->blocks_used; - ret = btrfs_end_transaction(trans, tree_root); + ret = btrfs_end_transaction_throttle(trans, tree_root); BUG_ON(ret); mutex_unlock(&root->fs_info->drop_mutex); diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c index 1677e4edaf6..b17693f61fb 100644 --- a/fs/btrfs/tree-defrag.c +++ b/fs/btrfs/tree-defrag.c @@ -38,8 +38,15 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, u64 last_ret = 0; if (root->fs_info->extent_root == root) { + /* + * there's recursion here right now in the tree locking, + * we can't defrag the extent root without deadlock + */ + goto out; +#if 0 mutex_lock(&root->fs_info->alloc_mutex); is_extent = 1; +#endif } if (root->ref_cows == 0 && !is_extent) -- cgit v1.2.3-70-g09d2 From e6dcd2dc9c489108648e2ed543315dd134d50a9a Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Thu, 17 Jul 2008 12:53:50 -0400 Subject: Btrfs: New data=ordered implementation The old data=ordered code would force commit to wait until all the data extents from the transaction were fully on disk. This introduced large latencies into the commit and stalled new writers in the transaction for a long time. The new code changes the way data allocations and extents work: * When delayed allocation is filled, data extents are reserved, and the extent bit EXTENT_ORDERED is set on the entire range of the extent. A struct btrfs_ordered_extent is allocated an inserted into a per-inode rbtree to track the pending extents. * As each page is written EXTENT_ORDERED is cleared on the bytes corresponding to that page. * When all of the bytes corresponding to a single struct btrfs_ordered_extent are written, The previously reserved extent is inserted into the FS btree and into the extent allocation trees. The checksums for the file data are also updated. Signed-off-by: Chris Mason --- fs/btrfs/btrfs_inode.h | 4 +- fs/btrfs/ctree.h | 19 +- fs/btrfs/disk-io.c | 13 +- fs/btrfs/extent-tree.c | 132 +++++++++----- fs/btrfs/extent_io.c | 52 +++++- fs/btrfs/extent_io.h | 14 +- fs/btrfs/extent_map.c | 5 +- fs/btrfs/file-item.c | 62 ++++--- fs/btrfs/file.c | 67 ++++--- fs/btrfs/inode.c | 447 ++++++++++++++++++++++++++++++----------------- fs/btrfs/ordered-data.c | 455 ++++++++++++++++++++++++++++++------------------ fs/btrfs/ordered-data.h | 71 ++++++-- fs/btrfs/transaction.c | 67 +------ fs/btrfs/transaction.h | 4 - 14 files changed, 910 insertions(+), 502 deletions(-) (limited to 'fs/btrfs/ordered-data.c') diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 40b4e0c9cd0..8d03687510e 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -21,6 +21,7 @@ #include "extent_map.h" #include "extent_io.h" +#include "ordered-data.h" /* in memory btrfs inode */ struct btrfs_inode { @@ -32,9 +33,8 @@ struct btrfs_inode { struct extent_io_tree io_failure_tree; struct mutex csum_mutex; struct inode vfs_inode; - atomic_t ordered_writeback; + struct btrfs_ordered_inode_tree ordered_tree; - u64 ordered_trans; /* * transid of the trans_handle that last modified this inode */ diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index f3783dbd9b6..ceebc052ddc 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -25,6 +25,7 @@ #include #include #include +#include #include #include "bit-radix.h" #include "extent_io.h" @@ -37,6 +38,7 @@ extern struct kmem_cache *btrfs_trans_handle_cachep; extern struct kmem_cache *btrfs_transaction_cachep; extern struct kmem_cache *btrfs_bit_radix_cachep; extern struct kmem_cache *btrfs_path_cachep; +struct btrfs_ordered_sum; #define BTRFS_MAGIC "_B5RfS_M" @@ -510,6 +512,7 @@ struct btrfs_fs_info { u64 max_inline; u64 alloc_start; struct btrfs_transaction *running_transaction; + wait_queue_head_t transaction_throttle; struct btrfs_super_block super_copy; struct btrfs_super_block super_for_commit; struct block_device *__bdev; @@ -541,6 +544,7 @@ struct btrfs_fs_info { */ struct btrfs_workers workers; struct btrfs_workers endio_workers; + struct btrfs_workers endio_write_workers; struct btrfs_workers submit_workers; struct task_struct *transaction_kthread; struct task_struct *cleaner_kthread; @@ -1384,6 +1388,17 @@ int btrfs_alloc_extent(struct btrfs_trans_handle *trans, u64 owner, u64 owner_offset, u64 empty_size, u64 hint_byte, u64 search_end, struct btrfs_key *ins, u64 data); +int btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + u64 root_objectid, u64 ref_generation, + u64 owner, u64 owner_offset, + struct btrfs_key *ins); +int btrfs_reserve_extent(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + u64 num_bytes, u64 min_alloc_size, + u64 empty_size, u64 hint_byte, + u64 search_end, struct btrfs_key *ins, + u64 data); int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf); int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root @@ -1556,9 +1571,9 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, u64 bytenr, int mod); int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, - struct bio *bio, char *sums); + struct btrfs_ordered_sum *sums); int btrfs_csum_one_bio(struct btrfs_root *root, - struct bio *bio, char **sums_ret); + struct bio *bio, struct btrfs_ordered_sum **sums_ret); struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index b01b3f4f92a..4a5ebafb935 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -407,7 +407,11 @@ static int end_workqueue_bio(struct bio *bio, end_io_wq->error = err; end_io_wq->work.func = end_workqueue_fn; end_io_wq->work.flags = 0; - btrfs_queue_worker(&fs_info->endio_workers, &end_io_wq->work); + if (bio->bi_rw & (1 << BIO_RW)) + btrfs_queue_worker(&fs_info->endio_write_workers, + &end_io_wq->work); + else + btrfs_queue_worker(&fs_info->endio_workers, &end_io_wq->work); #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) return 0; @@ -1286,6 +1290,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, mutex_init(&fs_info->transaction_kthread_mutex); mutex_init(&fs_info->cleaner_mutex); mutex_init(&fs_info->volume_mutex); + init_waitqueue_head(&fs_info->transaction_throttle); #if 0 ret = add_hasher(fs_info, "crc32c"); @@ -1325,9 +1330,13 @@ struct btrfs_root *open_ctree(struct super_block *sb, btrfs_init_workers(&fs_info->workers, fs_info->thread_pool_size); btrfs_init_workers(&fs_info->submit_workers, fs_info->thread_pool_size); btrfs_init_workers(&fs_info->endio_workers, fs_info->thread_pool_size); + btrfs_init_workers(&fs_info->endio_write_workers, + fs_info->thread_pool_size); btrfs_start_workers(&fs_info->workers, 1); btrfs_start_workers(&fs_info->submit_workers, 1); btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size); + btrfs_start_workers(&fs_info->endio_write_workers, + fs_info->thread_pool_size); err = -EINVAL; if (btrfs_super_num_devices(disk_super) > fs_devices->open_devices) { @@ -1447,6 +1456,7 @@ fail_sb_buffer: extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree); btrfs_stop_workers(&fs_info->workers); btrfs_stop_workers(&fs_info->endio_workers); + btrfs_stop_workers(&fs_info->endio_write_workers); btrfs_stop_workers(&fs_info->submit_workers); fail_iput: iput(fs_info->btree_inode); @@ -1702,6 +1712,7 @@ int close_ctree(struct btrfs_root *root) btrfs_stop_workers(&fs_info->workers); btrfs_stop_workers(&fs_info->endio_workers); + btrfs_stop_workers(&fs_info->endio_write_workers); btrfs_stop_workers(&fs_info->submit_workers); iput(fs_info->btree_inode); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 8ebfa6be079..343d1101c31 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -1895,36 +1895,17 @@ error: return ret; } -/* - * finds a free extent and does all the dirty work required for allocation - * returns the key for the extent through ins, and a tree buffer for - * the first block of the extent through buf. - * - * returns 0 if everything worked, non-zero otherwise. - */ -int btrfs_alloc_extent(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - u64 num_bytes, u64 min_alloc_size, - u64 root_objectid, u64 ref_generation, - u64 owner, u64 owner_offset, - u64 empty_size, u64 hint_byte, - u64 search_end, struct btrfs_key *ins, u64 data) +static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + u64 num_bytes, u64 min_alloc_size, + u64 empty_size, u64 hint_byte, + u64 search_end, struct btrfs_key *ins, + u64 data) { int ret; - int pending_ret; - u64 super_used; - u64 root_used; u64 search_start = 0; u64 alloc_profile; - u32 sizes[2]; struct btrfs_fs_info *info = root->fs_info; - struct btrfs_root *extent_root = info->extent_root; - struct btrfs_extent_item *extent_item; - struct btrfs_extent_ref *ref; - struct btrfs_path *path; - struct btrfs_key keys[2]; - - maybe_lock_mutex(root); if (data) { alloc_profile = info->avail_data_alloc_bits & @@ -1974,11 +1955,48 @@ again: } if (ret) { printk("allocation failed flags %Lu\n", data); - } - if (ret) { BUG(); - goto out; } + clear_extent_dirty(&root->fs_info->free_space_cache, + ins->objectid, ins->objectid + ins->offset - 1, + GFP_NOFS); + return 0; +} + +int btrfs_reserve_extent(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + u64 num_bytes, u64 min_alloc_size, + u64 empty_size, u64 hint_byte, + u64 search_end, struct btrfs_key *ins, + u64 data) +{ + int ret; + maybe_lock_mutex(root); + ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size, + empty_size, hint_byte, search_end, ins, + data); + maybe_unlock_mutex(root); + return ret; +} + +static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + u64 root_objectid, u64 ref_generation, + u64 owner, u64 owner_offset, + struct btrfs_key *ins) +{ + int ret; + int pending_ret; + u64 super_used; + u64 root_used; + u64 num_bytes = ins->offset; + u32 sizes[2]; + struct btrfs_fs_info *info = root->fs_info; + struct btrfs_root *extent_root = info->extent_root; + struct btrfs_extent_item *extent_item; + struct btrfs_extent_ref *ref; + struct btrfs_path *path; + struct btrfs_key keys[2]; /* block accounting for super block */ spin_lock_irq(&info->delalloc_lock); @@ -1990,10 +2008,6 @@ again: root_used = btrfs_root_used(&root->root_item); btrfs_set_root_used(&root->root_item, root_used + num_bytes); - clear_extent_dirty(&root->fs_info->free_space_cache, - ins->objectid, ins->objectid + ins->offset - 1, - GFP_NOFS); - if (root == extent_root) { set_extent_bits(&root->fs_info->extent_ins, ins->objectid, ins->objectid + ins->offset - 1, @@ -2001,10 +2015,6 @@ again: goto update_block; } - WARN_ON(trans->alloc_exclude_nr); - trans->alloc_exclude_start = ins->objectid; - trans->alloc_exclude_nr = ins->offset; - memcpy(&keys[0], ins, sizeof(*ins)); keys[1].offset = hash_extent_ref(root_objectid, ref_generation, owner, owner_offset); @@ -2054,6 +2064,51 @@ update_block: BUG(); } out: + return ret; +} + +int btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + u64 root_objectid, u64 ref_generation, + u64 owner, u64 owner_offset, + struct btrfs_key *ins) +{ + int ret; + maybe_lock_mutex(root); + ret = __btrfs_alloc_reserved_extent(trans, root, root_objectid, + ref_generation, owner, + owner_offset, ins); + maybe_unlock_mutex(root); + return ret; +} +/* + * finds a free extent and does all the dirty work required for allocation + * returns the key for the extent through ins, and a tree buffer for + * the first block of the extent through buf. + * + * returns 0 if everything worked, non-zero otherwise. + */ +int btrfs_alloc_extent(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + u64 num_bytes, u64 min_alloc_size, + u64 root_objectid, u64 ref_generation, + u64 owner, u64 owner_offset, + u64 empty_size, u64 hint_byte, + u64 search_end, struct btrfs_key *ins, u64 data) +{ + int ret; + + maybe_lock_mutex(root); + + ret = __btrfs_reserve_extent(trans, root, num_bytes, + min_alloc_size, empty_size, hint_byte, + search_end, ins, data); + BUG_ON(ret); + ret = __btrfs_alloc_reserved_extent(trans, root, root_objectid, + ref_generation, owner, + owner_offset, ins); + BUG_ON(ret); + maybe_unlock_mutex(root); return ret; } @@ -2288,8 +2343,8 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans, mutex_lock(&root->fs_info->alloc_mutex); /* we've dropped the lock, double check */ - ret = drop_snap_lookup_refcount(root, bytenr, - blocksize, &refs); + ret = lookup_extent_ref(NULL, root, bytenr, blocksize, + &refs); BUG_ON(ret); if (refs != 1) { parent = path->nodes[*level]; @@ -2584,7 +2639,6 @@ out_unlock: kfree(ra); trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1); if (trans) { - btrfs_add_ordered_inode(inode); btrfs_end_transaction(trans, BTRFS_I(inode)->root); mark_inode_dirty(inode); } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 40a5f53cb04..3f82a6e9ca4 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -793,6 +793,13 @@ int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, } EXPORT_SYMBOL(set_extent_dirty); +int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end, + gfp_t mask) +{ + return set_extent_bit(tree, start, end, EXTENT_ORDERED, 0, NULL, mask); +} +EXPORT_SYMBOL(set_extent_ordered); + int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int bits, gfp_t mask) { @@ -812,8 +819,8 @@ int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) { return set_extent_bit(tree, start, end, - EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL, - mask); + EXTENT_DELALLOC | EXTENT_DIRTY, + 0, NULL, mask); } EXPORT_SYMBOL(set_extent_delalloc); @@ -825,6 +832,13 @@ int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, } EXPORT_SYMBOL(clear_extent_dirty); +int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end, + gfp_t mask) +{ + return clear_extent_bit(tree, start, end, EXTENT_ORDERED, 1, 0, mask); +} +EXPORT_SYMBOL(clear_extent_ordered); + int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) { @@ -1395,10 +1409,9 @@ static int end_bio_extent_writepage(struct bio *bio, if (--bvec >= bio->bi_io_vec) prefetchw(&bvec->bv_page->flags); - if (tree->ops && tree->ops->writepage_end_io_hook) { ret = tree->ops->writepage_end_io_hook(page, start, - end, state); + end, state, uptodate); if (ret) uptodate = 0; } @@ -1868,9 +1881,14 @@ static int __extent_read_full_page(struct extent_io_tree *tree, unlock_extent(tree, cur, end, GFP_NOFS); break; } - extent_offset = cur - em->start; + if (extent_map_end(em) <= cur) { +printk("bad mapping em [%Lu %Lu] cur %Lu\n", em->start, extent_map_end(em), cur); + } BUG_ON(extent_map_end(em) <= cur); + if (end < cur) { +printk("2bad mapping end %Lu cur %Lu\n", end, cur); + } BUG_ON(end < cur); iosize = min(extent_map_end(em) - cur, end - cur + 1); @@ -1976,6 +1994,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, u64 last_byte = i_size_read(inode); u64 block_start; u64 iosize; + u64 unlock_start; sector_t sector; struct extent_map *em; struct block_device *bdev; @@ -1988,7 +2007,6 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, u64 nr_delalloc; u64 delalloc_end; - WARN_ON(!PageLocked(page)); page_offset = i_size & (PAGE_CACHE_SIZE - 1); if (page->index > end_index || @@ -2030,6 +2048,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, delalloc_start = delalloc_end + 1; } lock_extent(tree, start, page_end, GFP_NOFS); + unlock_start = start; end = page_end; if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) { @@ -2038,6 +2057,11 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, if (last_byte <= start) { clear_extent_dirty(tree, start, page_end, GFP_NOFS); + unlock_extent(tree, start, page_end, GFP_NOFS); + if (tree->ops && tree->ops->writepage_end_io_hook) + tree->ops->writepage_end_io_hook(page, start, + page_end, NULL, 1); + unlock_start = page_end + 1; goto done; } @@ -2047,6 +2071,11 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, while (cur <= end) { if (cur >= last_byte) { clear_extent_dirty(tree, cur, page_end, GFP_NOFS); + unlock_extent(tree, unlock_start, page_end, GFP_NOFS); + if (tree->ops && tree->ops->writepage_end_io_hook) + tree->ops->writepage_end_io_hook(page, cur, + page_end, NULL, 1); + unlock_start = page_end + 1; break; } em = epd->get_extent(inode, page, page_offset, cur, @@ -2071,8 +2100,16 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, block_start == EXTENT_MAP_INLINE) { clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS); + + unlock_extent(tree, unlock_start, cur + iosize -1, + GFP_NOFS); + if (tree->ops && tree->ops->writepage_end_io_hook) + tree->ops->writepage_end_io_hook(page, cur, + cur + iosize - 1, + NULL, 1); cur = cur + iosize; page_offset += iosize; + unlock_start = cur; continue; } @@ -2119,7 +2156,8 @@ done: set_page_writeback(page); end_page_writeback(page); } - unlock_extent(tree, start, page_end, GFP_NOFS); + if (unlock_start <= page_end) + unlock_extent(tree, unlock_start, page_end, GFP_NOFS); unlock_page(page); return 0; } diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index f1960dafaa1..2268a799589 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -13,6 +13,8 @@ #define EXTENT_DEFRAG (1 << 6) #define EXTENT_DEFRAG_DONE (1 << 7) #define EXTENT_BUFFER_FILLED (1 << 8) +#define EXTENT_ORDERED (1 << 9) +#define EXTENT_ORDERED_METADATA (1 << 10) #define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK) /* @@ -42,7 +44,7 @@ struct extent_io_ops { int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end, struct extent_state *state); int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end, - struct extent_state *state); + struct extent_state *state, int uptodate); int (*set_bit_hook)(struct inode *inode, u64 start, u64 end, unsigned long old, unsigned long bits); int (*clear_bit_hook)(struct inode *inode, u64 start, u64 end, @@ -131,6 +133,8 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits, int filled); int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int bits, gfp_t mask); +int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, + int bits, int wake, int delete, gfp_t mask); int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int bits, gfp_t mask); int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, @@ -141,8 +145,14 @@ int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); +int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end, + gfp_t mask); +int clear_extent_ordered_metadata(struct extent_io_tree *tree, u64 start, + u64 end, gfp_t mask); int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); +int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end, + gfp_t mask); int find_first_extent_bit(struct extent_io_tree *tree, u64 start, u64 *start_ret, u64 *end_ret, int bits); struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree, @@ -209,6 +219,8 @@ void memset_extent_buffer(struct extent_buffer *eb, char c, unsigned long start, unsigned long len); int wait_on_extent_buffer_writeback(struct extent_io_tree *tree, struct extent_buffer *eb); +int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end); +int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits); int clear_extent_buffer_dirty(struct extent_io_tree *tree, struct extent_buffer *eb); int set_extent_buffer_dirty(struct extent_io_tree *tree, diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index f5a04eb9a2a..81123277c2b 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -206,10 +206,11 @@ int add_extent_mapping(struct extent_map_tree *tree, struct extent_map *merge = NULL; struct rb_node *rb; + BUG_ON(spin_trylock(&tree->lock)); rb = tree_insert(&tree->map, em->start, &em->rb_node); if (rb) { - merge = rb_entry(rb, struct extent_map, rb_node); ret = -EEXIST; + free_extent_map(merge); goto out; } atomic_inc(&em->refs); @@ -268,6 +269,7 @@ struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, struct rb_node *next = NULL; u64 end = range_end(start, len); + BUG_ON(spin_trylock(&tree->lock)); em = tree->last; if (em && end > em->start && start < extent_map_end(em)) goto found; @@ -318,6 +320,7 @@ int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) { int ret = 0; + BUG_ON(spin_trylock(&tree->lock)); rb_erase(&em->rb_node, &tree->map); em->in_tree = 0; if (tree->last == em) diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index f537eb43c2c..345caf8ff51 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -135,26 +135,37 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, } int btrfs_csum_one_bio(struct btrfs_root *root, - struct bio *bio, char **sums_ret) + struct bio *bio, struct btrfs_ordered_sum **sums_ret) { - u32 *sums; + struct btrfs_ordered_sum *sums; + struct btrfs_sector_sum *sector_sum; char *data; struct bio_vec *bvec = bio->bi_io_vec; int bio_index = 0; - sums = kmalloc(bio->bi_vcnt * BTRFS_CRC32_SIZE, GFP_NOFS); + WARN_ON(bio->bi_vcnt <= 0); + sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_size), GFP_NOFS); if (!sums) return -ENOMEM; - *sums_ret = (char *)sums; + *sums_ret = sums; + sector_sum = &sums->sums; + sums->file_offset = page_offset(bvec->bv_page); + sums->len = bio->bi_size; + INIT_LIST_HEAD(&sums->list); while(bio_index < bio->bi_vcnt) { data = kmap_atomic(bvec->bv_page, KM_USER0); - *sums = ~(u32)0; - *sums = btrfs_csum_data(root, data + bvec->bv_offset, - *sums, bvec->bv_len); + sector_sum->sum = ~(u32)0; + sector_sum->sum = btrfs_csum_data(root, + data + bvec->bv_offset, + sector_sum->sum, + bvec->bv_len); kunmap_atomic(data, KM_USER0); - btrfs_csum_final(*sums, (char *)sums); - sums++; + btrfs_csum_final(sector_sum->sum, + (char *)§or_sum->sum); + sector_sum->offset = page_offset(bvec->bv_page) + + bvec->bv_offset; + sector_sum++; bio_index++; bvec++; } @@ -163,7 +174,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, - struct bio *bio, char *sums) + struct btrfs_ordered_sum *sums) { u64 objectid = inode->i_ino; u64 offset; @@ -171,17 +182,16 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, struct btrfs_key file_key; struct btrfs_key found_key; u64 next_offset; + u64 total_bytes = 0; int found_next; struct btrfs_path *path; struct btrfs_csum_item *item; struct btrfs_csum_item *item_end; struct extent_buffer *leaf = NULL; u64 csum_offset; - u32 *sums32 = (u32 *)sums; + struct btrfs_sector_sum *sector_sum; u32 nritems; u32 ins_size; - int bio_index = 0; - struct bio_vec *bvec = bio->bi_io_vec; char *eb_map; char *eb_token; unsigned long map_len; @@ -189,10 +199,11 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, path = btrfs_alloc_path(); BUG_ON(!path); + sector_sum = &sums->sums; again: next_offset = (u64)-1; found_next = 0; - offset = page_offset(bvec->bv_page) + bvec->bv_offset; + offset = sector_sum->offset; file_key.objectid = objectid; file_key.offset = offset; btrfs_set_key_type(&file_key, BTRFS_CSUM_ITEM_KEY); @@ -303,7 +314,7 @@ found: item_end = (struct btrfs_csum_item *)((unsigned char *)item_end + btrfs_item_size_nr(leaf, path->slots[0])); eb_token = NULL; -next_bvec: +next_sector: if (!eb_token || (unsigned long)item + BTRFS_CRC32_SIZE >= map_start + map_len) { @@ -321,21 +332,20 @@ next_bvec: } if (eb_token) { memcpy(eb_token + ((unsigned long)item & (PAGE_CACHE_SIZE - 1)), - sums32, BTRFS_CRC32_SIZE); + §or_sum->sum, BTRFS_CRC32_SIZE); } else { - write_extent_buffer(leaf, sums32, (unsigned long)item, - BTRFS_CRC32_SIZE); + write_extent_buffer(leaf, §or_sum->sum, + (unsigned long)item, BTRFS_CRC32_SIZE); } - bio_index++; - bvec++; - sums32++; - if (bio_index < bio->bi_vcnt) { + total_bytes += root->sectorsize; + sector_sum++; + if (total_bytes < sums->len) { item = (struct btrfs_csum_item *)((char *)item + BTRFS_CRC32_SIZE); if (item < item_end && offset + PAGE_CACHE_SIZE == - page_offset(bvec->bv_page)) { - offset = page_offset(bvec->bv_page); - goto next_bvec; + sector_sum->offset) { + offset = sector_sum->offset; + goto next_sector; } } if (eb_token) { @@ -343,7 +353,7 @@ next_bvec: eb_token = NULL; } btrfs_mark_buffer_dirty(path->nodes[0]); - if (bio_index < bio->bi_vcnt) { + if (total_bytes < sums->len) { btrfs_release_path(root, path); goto again; } diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 8037792f878..12e765f7e0d 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -34,7 +34,6 @@ #include "disk-io.h" #include "transaction.h" #include "btrfs_inode.h" -#include "ordered-data.h" #include "ioctl.h" #include "print-tree.h" #include "compat.h" @@ -273,7 +272,9 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans, u64 mask = root->sectorsize - 1; last_pos_in_file = (isize + mask) & ~mask; hole_size = (start_pos - last_pos_in_file + mask) & ~mask; - if (last_pos_in_file < start_pos) { + if (hole_size > 0) { + btrfs_wait_ordered_range(inode, last_pos_in_file, + last_pos_in_file + hole_size); err = btrfs_drop_extents(trans, root, inode, last_pos_in_file, last_pos_in_file + hole_size, @@ -303,19 +304,17 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans, inline_size > root->fs_info->max_inline || (inline_size & (root->sectorsize -1)) == 0 || inline_size >= BTRFS_MAX_INLINE_DATA_SIZE(root)) { - u64 last_end; - + /* check for reserved extents on each page, we don't want + * to reset the delalloc bit on things that already have + * extents reserved. + */ + set_extent_delalloc(io_tree, start_pos, + end_of_last_block, GFP_NOFS); for (i = 0; i < num_pages; i++) { struct page *p = pages[i]; SetPageUptodate(p); set_page_dirty(p); } - last_end = (u64)(pages[num_pages -1]->index) << - PAGE_CACHE_SHIFT; - last_end += PAGE_CACHE_SIZE - 1; - set_extent_delalloc(io_tree, start_pos, end_of_last_block, - GFP_NOFS); - btrfs_add_ordered_inode(inode); } else { u64 aligned_end; /* step one, delete the existing extents in this range */ @@ -350,10 +349,13 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) struct extent_map *split = NULL; struct extent_map *split2 = NULL; struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; + struct extent_map *tmp; u64 len = end - start + 1; + u64 next_start; int ret; int testend = 1; + WARN_ON(end < start); if (end == (u64)-1) { len = (u64)-1; testend = 0; @@ -370,6 +372,8 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) spin_unlock(&em_tree->lock); break; } + tmp = rb_entry(&em->rb_node, struct extent_map, rb_node); + next_start = tmp->start; remove_extent_mapping(em_tree, em); if (em->block_start < EXTENT_MAP_LAST_BYTE && @@ -778,37 +782,58 @@ static int prepare_pages(struct btrfs_root *root, struct file *file, struct inode *inode = fdentry(file)->d_inode; int err = 0; u64 start_pos; + u64 last_pos; start_pos = pos & ~((u64)root->sectorsize - 1); + last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT; memset(pages, 0, num_pages * sizeof(struct page *)); - +again: for (i = 0; i < num_pages; i++) { pages[i] = grab_cache_page(inode->i_mapping, index + i); if (!pages[i]) { err = -ENOMEM; BUG_ON(1); } -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18) - ClearPageDirty(pages[i]); -#else - cancel_dirty_page(pages[i], PAGE_CACHE_SIZE); -#endif wait_on_page_writeback(pages[i]); - set_page_extent_mapped(pages[i]); - WARN_ON(!PageLocked(pages[i])); } if (start_pos < inode->i_size) { - u64 last_pos; - last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT; + struct btrfs_ordered_extent *ordered; lock_extent(&BTRFS_I(inode)->io_tree, start_pos, last_pos - 1, GFP_NOFS); + ordered = btrfs_lookup_first_ordered_extent(inode, last_pos -1); + if (ordered && + ordered->file_offset + ordered->len > start_pos && + ordered->file_offset < last_pos) { + btrfs_put_ordered_extent(ordered); + unlock_extent(&BTRFS_I(inode)->io_tree, + start_pos, last_pos - 1, GFP_NOFS); + for (i = 0; i < num_pages; i++) { + unlock_page(pages[i]); + page_cache_release(pages[i]); + } + btrfs_wait_ordered_range(inode, start_pos, + last_pos - start_pos); + goto again; + } + if (ordered) + btrfs_put_ordered_extent(ordered); + clear_extent_bits(&BTRFS_I(inode)->io_tree, start_pos, last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC, GFP_NOFS); unlock_extent(&BTRFS_I(inode)->io_tree, start_pos, last_pos - 1, GFP_NOFS); } + for (i = 0; i < num_pages; i++) { +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18) + ClearPageDirty(pages[i]); +#else + cancel_dirty_page(pages[i], PAGE_CACHE_SIZE); +#endif + set_page_extent_mapped(pages[i]); + WARN_ON(!PageLocked(pages[i])); + } return 0; } @@ -969,13 +994,11 @@ out_nolock: (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT); } current->backing_dev_info = NULL; - btrfs_ordered_throttle(root, inode); return num_written ? num_written : err; } int btrfs_release_file(struct inode * inode, struct file * filp) { - btrfs_del_ordered_inode(inode, 0); if (filp->private_data) btrfs_ioctl_trans_end(filp); return 0; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index d39433dfb2c..c5a62f0b959 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -43,6 +43,7 @@ #include "ioctl.h" #include "print-tree.h" #include "volumes.h" +#include "ordered-data.h" struct btrfs_iget_args { u64 ino; @@ -109,10 +110,11 @@ static int cow_file_range(struct inode *inode, u64 start, u64 end) u64 num_bytes; u64 cur_alloc_size; u64 blocksize = root->sectorsize; - u64 orig_start = start; u64 orig_num_bytes; struct btrfs_key ins; - int ret; + struct extent_map *em; + struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; + int ret = 0; trans = btrfs_start_transaction(root, 1); BUG_ON(!trans); @@ -120,33 +122,44 @@ static int cow_file_range(struct inode *inode, u64 start, u64 end) num_bytes = (end - start + blocksize) & ~(blocksize - 1); num_bytes = max(blocksize, num_bytes); - ret = btrfs_drop_extents(trans, root, inode, - start, start + num_bytes, start, &alloc_hint); orig_num_bytes = num_bytes; if (alloc_hint == EXTENT_MAP_INLINE) goto out; BUG_ON(num_bytes > btrfs_super_total_bytes(&root->fs_info->super_copy)); + btrfs_drop_extent_cache(inode, start, start + num_bytes - 1); while(num_bytes > 0) { cur_alloc_size = min(num_bytes, root->fs_info->max_extent); - ret = btrfs_alloc_extent(trans, root, cur_alloc_size, - root->sectorsize, - root->root_key.objectid, - trans->transid, - inode->i_ino, start, 0, - alloc_hint, (u64)-1, &ins, 1); + ret = btrfs_reserve_extent(trans, root, cur_alloc_size, + root->sectorsize, 0, 0, + (u64)-1, &ins, 1); if (ret) { WARN_ON(1); goto out; } + em = alloc_extent_map(GFP_NOFS); + em->start = start; + em->len = ins.offset; + em->block_start = ins.objectid; + em->bdev = root->fs_info->fs_devices->latest_bdev; + while(1) { + spin_lock(&em_tree->lock); + ret = add_extent_mapping(em_tree, em); + spin_unlock(&em_tree->lock); + if (ret != -EEXIST) { + free_extent_map(em); + break; + } + btrfs_drop_extent_cache(inode, start, + start + ins.offset - 1); + } + cur_alloc_size = ins.offset; - ret = btrfs_insert_file_extent(trans, root, inode->i_ino, - start, ins.objectid, ins.offset, - ins.offset, 0); - inode->i_blocks += ins.offset >> 9; - btrfs_check_file(root, inode); + ret = btrfs_add_ordered_extent(inode, start, ins.objectid, + ins.offset); + BUG_ON(ret); if (num_bytes < cur_alloc_size) { printk("num_bytes %Lu cur_alloc %Lu\n", num_bytes, cur_alloc_size); @@ -156,10 +169,6 @@ static int cow_file_range(struct inode *inode, u64 start, u64 end) alloc_hint = ins.objectid + ins.offset; start += cur_alloc_size; } - btrfs_drop_extent_cache(inode, orig_start, - orig_start + orig_num_bytes - 1); - btrfs_add_ordered_inode(inode); - btrfs_update_inode(trans, root, inode); out: btrfs_end_transaction(trans, root); return ret; @@ -341,25 +350,15 @@ int __btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, int mirror_num) { struct btrfs_root *root = BTRFS_I(inode)->root; - struct btrfs_trans_handle *trans; int ret = 0; - char *sums = NULL; + struct btrfs_ordered_sum *sums; ret = btrfs_csum_one_bio(root, bio, &sums); BUG_ON(ret); - trans = btrfs_start_transaction(root, 1); - - btrfs_set_trans_block_group(trans, inode); - mutex_lock(&BTRFS_I(inode)->csum_mutex); - btrfs_csum_file_blocks(trans, root, inode, bio, sums); - mutex_unlock(&BTRFS_I(inode)->csum_mutex); - - ret = btrfs_end_transaction(trans, root); + ret = btrfs_add_ordered_sum(inode, sums); BUG_ON(ret); - kfree(sums); - return btrfs_map_bio(root, rw, bio, mirror_num, 1); } @@ -369,14 +368,10 @@ int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, struct btrfs_root *root = BTRFS_I(inode)->root; int ret = 0; - if (!(rw & (1 << BIO_RW))) { - ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); - BUG_ON(ret); - goto mapit; - } + ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); + BUG_ON(ret); - if (btrfs_test_opt(root, NODATASUM) || - btrfs_test_flag(inode, NODATASUM)) { + if (!(rw & (1 << BIO_RW))) { goto mapit; } @@ -387,6 +382,96 @@ mapit: return btrfs_map_bio(root, rw, bio, mirror_num, 0); } +static int add_pending_csums(struct btrfs_trans_handle *trans, + struct inode *inode, u64 file_offset, + struct list_head *list) +{ + struct list_head *cur; + struct btrfs_ordered_sum *sum; + + btrfs_set_trans_block_group(trans, inode); + while(!list_empty(list)) { + cur = list->next; + sum = list_entry(cur, struct btrfs_ordered_sum, list); + mutex_lock(&BTRFS_I(inode)->csum_mutex); + btrfs_csum_file_blocks(trans, BTRFS_I(inode)->root, + inode, sum); + mutex_unlock(&BTRFS_I(inode)->csum_mutex); + list_del(&sum->list); + kfree(sum); + } + return 0; +} + +int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, + struct extent_state *state, int uptodate) +{ + struct inode *inode = page->mapping->host; + struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_trans_handle *trans; + struct btrfs_ordered_extent *ordered_extent; + struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + u64 alloc_hint = 0; + struct list_head list; + struct btrfs_key ins; + int ret; + + ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1); + if (!ret) { + return 0; + } + + trans = btrfs_start_transaction(root, 1); + + ordered_extent = btrfs_lookup_ordered_extent(inode, start); + BUG_ON(!ordered_extent); + + lock_extent(io_tree, ordered_extent->file_offset, + ordered_extent->file_offset + ordered_extent->len - 1, + GFP_NOFS); + + INIT_LIST_HEAD(&list); + + ins.objectid = ordered_extent->start; + ins.offset = ordered_extent->len; + ins.type = BTRFS_EXTENT_ITEM_KEY; + ret = btrfs_alloc_reserved_extent(trans, root, root->root_key.objectid, + trans->transid, inode->i_ino, + ordered_extent->file_offset, &ins); + BUG_ON(ret); + ret = btrfs_drop_extents(trans, root, inode, + ordered_extent->file_offset, + ordered_extent->file_offset + + ordered_extent->len, + ordered_extent->file_offset, &alloc_hint); + BUG_ON(ret); + ret = btrfs_insert_file_extent(trans, root, inode->i_ino, + ordered_extent->file_offset, + ordered_extent->start, + ordered_extent->len, + ordered_extent->len, 0); + BUG_ON(ret); + btrfs_drop_extent_cache(inode, ordered_extent->file_offset, + ordered_extent->file_offset + + ordered_extent->len - 1); + inode->i_blocks += ordered_extent->len >> 9; + unlock_extent(io_tree, ordered_extent->file_offset, + ordered_extent->file_offset + ordered_extent->len - 1, + GFP_NOFS); + add_pending_csums(trans, inode, ordered_extent->file_offset, + &ordered_extent->list); + + btrfs_remove_ordered_extent(inode, ordered_extent); + /* once for us */ + btrfs_put_ordered_extent(ordered_extent); + /* once for the tree */ + btrfs_put_ordered_extent(ordered_extent); + + btrfs_update_inode(trans, root, inode); + btrfs_end_transaction(trans, root); + return 0; +} + int btrfs_readpage_io_hook(struct page *page, u64 start, u64 end) { int ret = 0; @@ -409,7 +494,8 @@ int btrfs_readpage_io_hook(struct page *page, u64 start, u64 end) if (ret == -ENOENT || ret == -EFBIG) ret = 0; csum = 0; - printk("no csum found for inode %lu start %Lu\n", inode->i_ino, start); + printk("no csum found for inode %lu start %Lu\n", inode->i_ino, + start); goto out; } read_extent_buffer(path->nodes[0], &csum, (unsigned long)item, @@ -833,7 +919,6 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry) { struct btrfs_root *root; struct btrfs_trans_handle *trans; - struct inode *inode = dentry->d_inode; int ret; unsigned long nr = 0; @@ -849,14 +934,6 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry) ret = btrfs_unlink_trans(trans, root, dir, dentry); nr = trans->blocks_used; - if (inode->i_nlink == 0) { - /* if the inode isn't linked anywhere, - * we don't need to worry about - * data=ordered - */ - btrfs_del_ordered_inode(inode, 1); - } - btrfs_end_transaction_throttle(trans, root); fail: btrfs_btree_balance_dirty(root, nr); @@ -931,6 +1008,7 @@ static int btrfs_truncate_in_trans(struct btrfs_trans_handle *trans, int extent_type = -1; u64 mask = root->sectorsize - 1; + btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1); btrfs_drop_extent_cache(inode, inode->i_size & (~mask), (u64)-1); path = btrfs_alloc_path(); path->reada = -1; @@ -1117,34 +1195,6 @@ error: return ret; } -static int btrfs_cow_one_page(struct inode *inode, struct page *page, - size_t zero_start) -{ - char *kaddr; - struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; - u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT; - u64 page_end = page_start + PAGE_CACHE_SIZE - 1; - int ret = 0; - - WARN_ON(!PageLocked(page)); - set_page_extent_mapped(page); - - lock_extent(io_tree, page_start, page_end, GFP_NOFS); - set_extent_delalloc(&BTRFS_I(inode)->io_tree, page_start, - page_end, GFP_NOFS); - - if (zero_start != PAGE_CACHE_SIZE) { - kaddr = kmap(page); - memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start); - flush_dcache_page(page); - kunmap(page); - } - set_page_dirty(page); - unlock_extent(io_tree, page_start, page_end, GFP_NOFS); - - return ret; -} - /* * taken from block_truncate_page, but does cow as it zeros out * any bytes left in the last page in the file. @@ -1153,12 +1203,16 @@ static int btrfs_truncate_page(struct address_space *mapping, loff_t from) { struct inode *inode = mapping->host; struct btrfs_root *root = BTRFS_I(inode)->root; + struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + struct btrfs_ordered_extent *ordered; + char *kaddr; u32 blocksize = root->sectorsize; pgoff_t index = from >> PAGE_CACHE_SHIFT; unsigned offset = from & (PAGE_CACHE_SIZE-1); struct page *page; int ret = 0; u64 page_start; + u64 page_end; if ((offset & (blocksize - 1)) == 0) goto out; @@ -1168,6 +1222,10 @@ again: page = grab_cache_page(mapping, index); if (!page) goto out; + + page_start = page_offset(page); + page_end = page_start + PAGE_CACHE_SIZE - 1; + if (!PageUptodate(page)) { ret = btrfs_readpage(NULL, page); lock_page(page); @@ -1181,10 +1239,32 @@ again: goto out; } } - - page_start = (u64)page->index << PAGE_CACHE_SHIFT; wait_on_page_writeback(page); - ret = btrfs_cow_one_page(inode, page, offset); + + lock_extent(io_tree, page_start, page_end, GFP_NOFS); + set_page_extent_mapped(page); + + ordered = btrfs_lookup_ordered_extent(inode, page_start); + if (ordered) { + unlock_extent(io_tree, page_start, page_end, GFP_NOFS); + unlock_page(page); + page_cache_release(page); + btrfs_wait_ordered_extent(inode, ordered); + btrfs_put_ordered_extent(ordered); + goto again; + } + + set_extent_delalloc(&BTRFS_I(inode)->io_tree, page_start, + page_end, GFP_NOFS); + ret = 0; + if (offset != PAGE_CACHE_SIZE) { + kaddr = kmap(page); + memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); + flush_dcache_page(page); + kunmap(page); + } + set_page_dirty(page); + unlock_extent(io_tree, page_start, page_end, GFP_NOFS); unlock_page(page); page_cache_release(page); @@ -1222,8 +1302,9 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr) btrfs_truncate_page(inode->i_mapping, inode->i_size); - lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS); hole_size = block_end - hole_start; + btrfs_wait_ordered_range(inode, hole_start, hole_size); + lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS); trans = btrfs_start_transaction(root, 1); btrfs_set_trans_block_group(trans, inode); @@ -1258,6 +1339,7 @@ void btrfs_delete_inode(struct inode *inode) unsigned long nr; int ret; + btrfs_wait_ordered_range(inode, 0, (u64)-1); truncate_inode_pages(&inode->i_data, 0); if (is_bad_inode(inode)) { goto no_delete; @@ -1403,7 +1485,6 @@ static int btrfs_init_locked_inode(struct inode *inode, void *p) extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree, inode->i_mapping, GFP_NOFS); mutex_init(&BTRFS_I(inode)->csum_mutex); - atomic_set(&BTRFS_I(inode)->ordered_writeback, 0); return 0; } @@ -1705,7 +1786,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree, inode->i_mapping, GFP_NOFS); mutex_init(&BTRFS_I(inode)->csum_mutex); - atomic_set(&BTRFS_I(inode)->ordered_writeback, 0); BTRFS_I(inode)->delalloc_bytes = 0; BTRFS_I(inode)->root = root; @@ -1930,7 +2010,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, inode->i_mapping, GFP_NOFS); mutex_init(&BTRFS_I(inode)->csum_mutex); BTRFS_I(inode)->delalloc_bytes = 0; - atomic_set(&BTRFS_I(inode)->ordered_writeback, 0); BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; } dir->i_sb->s_dirt = 1; @@ -2066,64 +2145,18 @@ out_unlock: static int merge_extent_mapping(struct extent_map_tree *em_tree, struct extent_map *existing, - struct extent_map *em) + struct extent_map *em, + u64 map_start, u64 map_len) { u64 start_diff; - u64 new_end; - int ret = 0; - int real_blocks = existing->block_start < EXTENT_MAP_LAST_BYTE; - - if (real_blocks && em->block_start >= EXTENT_MAP_LAST_BYTE) - goto invalid; - - if (!real_blocks && em->block_start != existing->block_start) - goto invalid; - - new_end = max(existing->start + existing->len, em->start + em->len); - - if (existing->start >= em->start) { - if (em->start + em->len < existing->start) - goto invalid; - start_diff = existing->start - em->start; - if (real_blocks && em->block_start + start_diff != - existing->block_start) - goto invalid; - - em->len = new_end - em->start; - - remove_extent_mapping(em_tree, existing); - /* free for the tree */ - free_extent_map(existing); - ret = add_extent_mapping(em_tree, em); - - } else if (em->start > existing->start) { - - if (existing->start + existing->len < em->start) - goto invalid; - - start_diff = em->start - existing->start; - if (real_blocks && existing->block_start + start_diff != - em->block_start) - goto invalid; - - remove_extent_mapping(em_tree, existing); - em->block_start = existing->block_start; - em->start = existing->start; - em->len = new_end - existing->start; - free_extent_map(existing); - - ret = add_extent_mapping(em_tree, em); - } else { - goto invalid; - } - return ret; - -invalid: - printk("invalid extent map merge [%Lu %Lu %Lu] [%Lu %Lu %Lu]\n", - existing->start, existing->len, existing->block_start, - em->start, em->len, em->block_start); - return -EIO; + BUG_ON(map_start < em->start || map_start >= extent_map_end(em)); + start_diff = map_start - em->start; + em->start = map_start; + em->len = map_len; + if (em->block_start < EXTENT_MAP_LAST_BYTE) + em->block_start += start_diff; + return add_extent_mapping(em_tree, em); } struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, @@ -2170,10 +2203,9 @@ again: err = -ENOMEM; goto out; } - + em->bdev = root->fs_info->fs_devices->latest_bdev; em->start = EXTENT_MAP_HOLE; em->len = (u64)-1; - em->bdev = root->fs_info->fs_devices->latest_bdev; ret = btrfs_lookup_file_extent(trans, root, path, objectid, start, trans != NULL); if (ret < 0) { @@ -2314,6 +2346,9 @@ insert: */ if (ret == -EEXIST) { struct extent_map *existing; + + ret = 0; + existing = lookup_extent_mapping(em_tree, start, len); if (existing && (existing->start > start || existing->start + existing->len <= start)) { @@ -2325,7 +2360,8 @@ insert: em->len); if (existing) { err = merge_extent_mapping(em_tree, existing, - em); + em, start, + root->sectorsize); free_extent_map(existing); if (err) { free_extent_map(em); @@ -2341,6 +2377,7 @@ insert: } else { free_extent_map(em); em = existing; + err = 0; } } spin_unlock(&em_tree->lock); @@ -2348,8 +2385,9 @@ out: btrfs_free_path(path); if (trans) { ret = btrfs_end_transaction(trans, root); - if (!err) + if (!err) { err = ret; + } } if (err) { free_extent_map(em); @@ -2474,8 +2512,7 @@ btrfs_readpages(struct file *file, struct address_space *mapping, return extent_readpages(tree, mapping, pages, nr_pages, btrfs_get_extent); } - -static int btrfs_releasepage(struct page *page, gfp_t gfp_flags) +static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags) { struct extent_io_tree *tree; struct extent_map_tree *map; @@ -2493,15 +2530,54 @@ static int btrfs_releasepage(struct page *page, gfp_t gfp_flags) return ret; } +static int btrfs_releasepage(struct page *page, gfp_t gfp_flags) +{ + struct btrfs_ordered_extent *ordered; + + ordered = btrfs_lookup_ordered_extent(page->mapping->host, + page_offset(page)); + if (ordered) { + btrfs_put_ordered_extent(ordered); + return 0; + } + return __btrfs_releasepage(page, gfp_flags); +} + static void btrfs_invalidatepage(struct page *page, unsigned long offset) { struct extent_io_tree *tree; + struct btrfs_ordered_extent *ordered; + u64 page_start = page_offset(page); + u64 page_end = page_start + PAGE_CACHE_SIZE - 1; + wait_on_page_writeback(page); tree = &BTRFS_I(page->mapping->host)->io_tree; - extent_invalidatepage(tree, page, offset); - btrfs_releasepage(page, GFP_NOFS); + if (offset) { + btrfs_releasepage(page, GFP_NOFS); + return; + } + + lock_extent(tree, page_start, page_end, GFP_NOFS); + ordered = btrfs_lookup_ordered_extent(page->mapping->host, + page_offset(page)); + if (ordered) { + clear_extent_bit(tree, page_start, page_end, + EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_LOCKED, 1, 0, GFP_NOFS); + btrfs_writepage_end_io_hook(page, page_start, + page_end, NULL, 1); + btrfs_put_ordered_extent(ordered); + lock_extent(tree, page_start, page_end, GFP_NOFS); + } + clear_extent_bit(tree, page_start, page_end, + EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_ORDERED, + 1, 1, GFP_NOFS); + __btrfs_releasepage(page, GFP_NOFS); + if (PagePrivate(page)) { - invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE); + invalidate_extent_lru(tree, page_offset(page), + PAGE_CACHE_SIZE); ClearPagePrivate(page); set_page_private(page, 0); page_cache_release(page); @@ -2527,35 +2603,63 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page) { struct inode *inode = fdentry(vma->vm_file)->d_inode; struct btrfs_root *root = BTRFS_I(inode)->root; - unsigned long end; + struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + struct btrfs_ordered_extent *ordered; + char *kaddr; + unsigned long zero_start; loff_t size; int ret; u64 page_start; + u64 page_end; ret = btrfs_check_free_space(root, PAGE_CACHE_SIZE, 0); if (ret) goto out; ret = -EINVAL; - +again: lock_page(page); - wait_on_page_writeback(page); size = i_size_read(inode); - page_start = (u64)page->index << PAGE_CACHE_SHIFT; + page_start = page_offset(page); + page_end = page_start + PAGE_CACHE_SIZE - 1; if ((page->mapping != inode->i_mapping) || - (page_start > size)) { + (page_start >= size)) { /* page got truncated out from underneath us */ goto out_unlock; } + wait_on_page_writeback(page); + + lock_extent(io_tree, page_start, page_end, GFP_NOFS); + set_page_extent_mapped(page); + + ordered = btrfs_lookup_ordered_extent(inode, page_start); + if (ordered) { + unlock_extent(io_tree, page_start, page_end, GFP_NOFS); + unlock_page(page); + btrfs_wait_ordered_extent(inode, ordered); + btrfs_put_ordered_extent(ordered); + goto again; + } + + set_extent_delalloc(&BTRFS_I(inode)->io_tree, page_start, + page_end, GFP_NOFS); + ret = 0; /* page is wholly or partially inside EOF */ if (page_start + PAGE_CACHE_SIZE > size) - end = size & ~PAGE_CACHE_MASK; + zero_start = size & ~PAGE_CACHE_MASK; else - end = PAGE_CACHE_SIZE; + zero_start = PAGE_CACHE_SIZE; - ret = btrfs_cow_one_page(inode, page, end); + if (zero_start != PAGE_CACHE_SIZE) { + kaddr = kmap(page); + memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start); + flush_dcache_page(page); + kunmap(page); + } + set_page_dirty(page); + unlock_extent(io_tree, page_start, page_end, GFP_NOFS); out_unlock: unlock_page(page); @@ -2662,15 +2766,28 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) if (!ei) return NULL; ei->last_trans = 0; - ei->ordered_trans = 0; + btrfs_ordered_inode_tree_init(&ei->ordered_tree); return &ei->vfs_inode; } void btrfs_destroy_inode(struct inode *inode) { + struct btrfs_ordered_extent *ordered; WARN_ON(!list_empty(&inode->i_dentry)); WARN_ON(inode->i_data.nrpages); + while(1) { + ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); + if (!ordered) + break; + else { + printk("found ordered extent %Lu %Lu\n", + ordered->file_offset, ordered->len); + btrfs_remove_ordered_extent(inode, ordered); + btrfs_put_ordered_extent(ordered); + btrfs_put_ordered_extent(ordered); + } + } btrfs_drop_extent_cache(inode, 0, (u64)-1); kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); } @@ -2869,7 +2986,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, inode->i_mapping, GFP_NOFS); mutex_init(&BTRFS_I(inode)->csum_mutex); BTRFS_I(inode)->delalloc_bytes = 0; - atomic_set(&BTRFS_I(inode)->ordered_writeback, 0); BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; } dir->i_sb->s_dirt = 1; @@ -2921,6 +3037,20 @@ out_fail: return err; } +static int btrfs_set_page_dirty(struct page *page) +{ + struct inode *inode = page->mapping->host; + u64 page_start = page_offset(page); + u64 page_end = page_start + PAGE_CACHE_SIZE - 1; + + if (!test_range_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, + EXTENT_DELALLOC, 0)) { +printk("inode %lu page %Lu not delalloc\n", inode->i_ino, page_offset(page)); +WARN_ON(1); + } + return __set_page_dirty_nobuffers(page); +} + static int btrfs_permission(struct inode *inode, int mask, struct nameidata *nd) { @@ -2967,6 +3097,7 @@ static struct extent_io_ops btrfs_extent_io_ops = { .merge_bio_hook = btrfs_merge_bio_hook, .readpage_io_hook = btrfs_readpage_io_hook, .readpage_end_io_hook = btrfs_readpage_end_io_hook, + .writepage_end_io_hook = btrfs_writepage_end_io_hook, .readpage_io_failed_hook = btrfs_io_failed_hook, .set_bit_hook = btrfs_set_bit_hook, .clear_bit_hook = btrfs_clear_bit_hook, @@ -2982,7 +3113,7 @@ static struct address_space_operations btrfs_aops = { .direct_IO = btrfs_direct_IO, .invalidatepage = btrfs_invalidatepage, .releasepage = btrfs_releasepage, - .set_page_dirty = __set_page_dirty_nobuffers, + .set_page_dirty = btrfs_set_page_dirty, }; static struct address_space_operations btrfs_symlink_aops = { diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 254da822566..6513270f054 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -22,48 +22,30 @@ #include "ctree.h" #include "transaction.h" #include "btrfs_inode.h" +#include "extent_io.h" -struct tree_entry { - u64 root_objectid; - u64 objectid; - struct inode *inode; - struct rb_node rb_node; -}; -/* - * returns > 0 if entry passed (root, objectid) is > entry, - * < 0 if (root, objectid) < entry and zero if they are equal - */ -static int comp_entry(struct tree_entry *entry, u64 root_objectid, - u64 objectid) +static u64 entry_end(struct btrfs_ordered_extent *entry) { - if (root_objectid < entry->root_objectid) - return -1; - if (root_objectid > entry->root_objectid) - return 1; - if (objectid < entry->objectid) - return -1; - if (objectid > entry->objectid) - return 1; - return 0; + if (entry->file_offset + entry->len < entry->file_offset) + return (u64)-1; + return entry->file_offset + entry->len; } -static struct rb_node *tree_insert(struct rb_root *root, u64 root_objectid, - u64 objectid, struct rb_node *node) +static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, + struct rb_node *node) { struct rb_node ** p = &root->rb_node; struct rb_node * parent = NULL; - struct tree_entry *entry; - int comp; + struct btrfs_ordered_extent *entry; while(*p) { parent = *p; - entry = rb_entry(parent, struct tree_entry, rb_node); + entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); - comp = comp_entry(entry, root_objectid, objectid); - if (comp < 0) + if (file_offset < entry->file_offset) p = &(*p)->rb_left; - else if (comp > 0) + else if (file_offset >= entry_end(entry)) p = &(*p)->rb_right; else return parent; @@ -74,24 +56,23 @@ static struct rb_node *tree_insert(struct rb_root *root, u64 root_objectid, return NULL; } -static struct rb_node *__tree_search(struct rb_root *root, u64 root_objectid, - u64 objectid, struct rb_node **prev_ret) +static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, + struct rb_node **prev_ret) { struct rb_node * n = root->rb_node; struct rb_node *prev = NULL; - struct tree_entry *entry; - struct tree_entry *prev_entry = NULL; - int comp; + struct rb_node *test; + struct btrfs_ordered_extent *entry; + struct btrfs_ordered_extent *prev_entry = NULL; while(n) { - entry = rb_entry(n, struct tree_entry, rb_node); + entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); prev = n; prev_entry = entry; - comp = comp_entry(entry, root_objectid, objectid); - if (comp < 0) + if (file_offset < entry->file_offset) n = n->rb_left; - else if (comp > 0) + else if (file_offset >= entry_end(entry)) n = n->rb_right; else return n; @@ -99,195 +80,329 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 root_objectid, if (!prev_ret) return NULL; - while(prev && comp_entry(prev_entry, root_objectid, objectid) >= 0) { - prev = rb_next(prev); - prev_entry = rb_entry(prev, struct tree_entry, rb_node); + while(prev && file_offset >= entry_end(prev_entry)) { + test = rb_next(prev); + if (!test) + break; + prev_entry = rb_entry(test, struct btrfs_ordered_extent, + rb_node); + if (file_offset < entry_end(prev_entry)) + break; + + prev = test; + } + if (prev) + prev_entry = rb_entry(prev, struct btrfs_ordered_extent, + rb_node); + while(prev && file_offset < entry_end(prev_entry)) { + test = rb_prev(prev); + if (!test) + break; + prev_entry = rb_entry(test, struct btrfs_ordered_extent, + rb_node); + prev = test; } *prev_ret = prev; return NULL; } -static inline struct rb_node *tree_search(struct rb_root *root, - u64 root_objectid, u64 objectid) +static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset) +{ + if (file_offset < entry->file_offset || + entry->file_offset + entry->len <= file_offset) + return 0; + return 1; +} + +static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, + u64 file_offset) { + struct rb_root *root = &tree->tree; struct rb_node *prev; struct rb_node *ret; - ret = __tree_search(root, root_objectid, objectid, &prev); + struct btrfs_ordered_extent *entry; + + if (tree->last) { + entry = rb_entry(tree->last, struct btrfs_ordered_extent, + rb_node); + if (offset_in_entry(entry, file_offset)) + return tree->last; + } + ret = __tree_search(root, file_offset, &prev); if (!ret) - return prev; + ret = prev; + if (ret) + tree->last = ret; return ret; } -int btrfs_add_ordered_inode(struct inode *inode) +int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, + u64 start, u64 len) { - struct btrfs_root *root = BTRFS_I(inode)->root; - u64 root_objectid = root->root_key.objectid; - u64 transid = root->fs_info->running_transaction->transid; - struct tree_entry *entry; - struct rb_node *node; struct btrfs_ordered_inode_tree *tree; + struct rb_node *node; + struct btrfs_ordered_extent *entry; - if (transid <= BTRFS_I(inode)->ordered_trans) - return 0; - - tree = &root->fs_info->running_transaction->ordered_inode_tree; - - read_lock(&tree->lock); - node = __tree_search(&tree->tree, root_objectid, inode->i_ino, NULL); - read_unlock(&tree->lock); - if (node) { - return 0; - } - - entry = kmalloc(sizeof(*entry), GFP_NOFS); + tree = &BTRFS_I(inode)->ordered_tree; + entry = kzalloc(sizeof(*entry), GFP_NOFS); if (!entry) return -ENOMEM; - write_lock(&tree->lock); - entry->objectid = inode->i_ino; - entry->root_objectid = root_objectid; + mutex_lock(&tree->mutex); + entry->file_offset = file_offset; + entry->start = start; + entry->len = len; entry->inode = inode; + /* one ref for the tree */ + atomic_set(&entry->refs, 1); + init_waitqueue_head(&entry->wait); + INIT_LIST_HEAD(&entry->list); - node = tree_insert(&tree->tree, root_objectid, - inode->i_ino, &entry->rb_node); - - BTRFS_I(inode)->ordered_trans = transid; - if (!node) - igrab(inode); - - write_unlock(&tree->lock); + node = tree_insert(&tree->tree, file_offset, + &entry->rb_node); + if (node) { + entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); + atomic_inc(&entry->refs); + } + set_extent_ordered(&BTRFS_I(inode)->io_tree, file_offset, + entry_end(entry) - 1, GFP_NOFS); - if (node) - kfree(entry); + set_bit(BTRFS_ORDERED_START, &entry->flags); + mutex_unlock(&tree->mutex); + BUG_ON(node); return 0; } -int btrfs_find_first_ordered_inode(struct btrfs_ordered_inode_tree *tree, - u64 *root_objectid, u64 *objectid, - struct inode **inode) +int btrfs_add_ordered_sum(struct inode *inode, struct btrfs_ordered_sum *sum) { - struct tree_entry *entry; + struct btrfs_ordered_inode_tree *tree; struct rb_node *node; + struct btrfs_ordered_extent *entry; - write_lock(&tree->lock); - node = tree_search(&tree->tree, *root_objectid, *objectid); + tree = &BTRFS_I(inode)->ordered_tree; + mutex_lock(&tree->mutex); + node = tree_search(tree, sum->file_offset); if (!node) { - write_unlock(&tree->lock); - return 0; +search_fail: +printk("add ordered sum failed to find a node for inode %lu offset %Lu\n", inode->i_ino, sum->file_offset); + node = rb_first(&tree->tree); + while(node) { + entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); + printk("entry %Lu %Lu %Lu\n", entry->file_offset, entry->file_offset + entry->len, entry->start); + node = rb_next(node); + } + BUG(); } - entry = rb_entry(node, struct tree_entry, rb_node); + BUG_ON(!node); - while(comp_entry(entry, *root_objectid, *objectid) >= 0) { - node = rb_next(node); - if (!node) - break; - entry = rb_entry(node, struct tree_entry, rb_node); - } - if (!node) { - write_unlock(&tree->lock); - return 0; + entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); + if (!offset_in_entry(entry, sum->file_offset)) { + goto search_fail; } - *root_objectid = entry->root_objectid; - *inode = entry->inode; - atomic_inc(&entry->inode->i_count); - *objectid = entry->objectid; - write_unlock(&tree->lock); - return 1; + list_add_tail(&sum->list, &entry->list); + mutex_unlock(&tree->mutex); + return 0; } -int btrfs_find_del_first_ordered_inode(struct btrfs_ordered_inode_tree *tree, - u64 *root_objectid, u64 *objectid, - struct inode **inode) +int btrfs_dec_test_ordered_pending(struct inode *inode, + u64 file_offset, u64 io_size) { - struct tree_entry *entry; + struct btrfs_ordered_inode_tree *tree; struct rb_node *node; - - write_lock(&tree->lock); - node = tree_search(&tree->tree, *root_objectid, *objectid); + struct btrfs_ordered_extent *entry; + struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + int ret; + + tree = &BTRFS_I(inode)->ordered_tree; + mutex_lock(&tree->mutex); + clear_extent_ordered(io_tree, file_offset, file_offset + io_size - 1, + GFP_NOFS); + node = tree_search(tree, file_offset); if (!node) { - write_unlock(&tree->lock); - return 0; + ret = 1; + goto out; } - entry = rb_entry(node, struct tree_entry, rb_node); - while(comp_entry(entry, *root_objectid, *objectid) >= 0) { - node = rb_next(node); - if (!node) - break; - entry = rb_entry(node, struct tree_entry, rb_node); + entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); + if (!offset_in_entry(entry, file_offset)) { + ret = 1; + goto out; } - if (!node) { - write_unlock(&tree->lock); - return 0; + + ret = test_range_bit(io_tree, entry->file_offset, + entry->file_offset + entry->len - 1, + EXTENT_ORDERED, 0); + if (!test_bit(BTRFS_ORDERED_START, &entry->flags)) { +printk("inode %lu not ready yet for extent %Lu %Lu\n", inode->i_ino, entry->file_offset, entry_end(entry)); } + if (ret == 0) + ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); +out: + mutex_unlock(&tree->mutex); + return ret == 0; +} - *root_objectid = entry->root_objectid; - *objectid = entry->objectid; - *inode = entry->inode; - atomic_inc(&entry->inode->i_count); - rb_erase(node, &tree->tree); - write_unlock(&tree->lock); - kfree(entry); - return 1; +int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) +{ + if (atomic_dec_and_test(&entry->refs)) + kfree(entry); + return 0; } -static void __btrfs_del_ordered_inode(struct btrfs_ordered_inode_tree *tree, - struct inode *inode, - u64 root_objectid, u64 objectid) +int btrfs_remove_ordered_extent(struct inode *inode, + struct btrfs_ordered_extent *entry) { - struct tree_entry *entry; + struct btrfs_ordered_inode_tree *tree; struct rb_node *node; - struct rb_node *prev; - write_lock(&tree->lock); - node = __tree_search(&tree->tree, root_objectid, objectid, &prev); - if (!node) { - write_unlock(&tree->lock); - return; - } + tree = &BTRFS_I(inode)->ordered_tree; + mutex_lock(&tree->mutex); + node = &entry->rb_node; rb_erase(node, &tree->tree); - BTRFS_I(inode)->ordered_trans = 0; - write_unlock(&tree->lock); - atomic_dec(&inode->i_count); - entry = rb_entry(node, struct tree_entry, rb_node); - kfree(entry); - return; + tree->last = NULL; + set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); + mutex_unlock(&tree->mutex); + wake_up(&entry->wait); + return 0; } -void btrfs_del_ordered_inode(struct inode *inode, int force) +void btrfs_wait_ordered_extent(struct inode *inode, + struct btrfs_ordered_extent *entry) { - struct btrfs_root *root = BTRFS_I(inode)->root; - u64 root_objectid = root->root_key.objectid; + u64 start = entry->file_offset; + u64 end = start + entry->len - 1; +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) + do_sync_file_range(file, start, end, SYNC_FILE_RANGE_WRITE); +#else + do_sync_mapping_range(inode->i_mapping, start, end, + SYNC_FILE_RANGE_WRITE); +#endif + wait_event(entry->wait, + test_bit(BTRFS_ORDERED_COMPLETE, &entry->flags)); +} - if (!BTRFS_I(inode)->ordered_trans) { - return; - } +static void btrfs_start_ordered_extent(struct inode *inode, + struct btrfs_ordered_extent *entry, int wait) +{ + u64 start = entry->file_offset; + u64 end = start + entry->len - 1; - if (!force && (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY) || - mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK))) - return; +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) + do_sync_file_range(file, start, end, SYNC_FILE_RANGE_WRITE); +#else + do_sync_mapping_range(inode->i_mapping, start, end, + SYNC_FILE_RANGE_WRITE); +#endif + if (wait) + wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, + &entry->flags)); +} - spin_lock(&root->fs_info->new_trans_lock); - if (root->fs_info->running_transaction) { - struct btrfs_ordered_inode_tree *tree; - tree = &root->fs_info->running_transaction->ordered_inode_tree; - __btrfs_del_ordered_inode(tree, inode, root_objectid, - inode->i_ino); +void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) +{ + u64 end; + struct btrfs_ordered_extent *ordered; + int found; + int should_wait = 0; + +again: + if (start + len < start) + end = (u64)-1; + else + end = start + len - 1; + found = 0; + while(1) { + ordered = btrfs_lookup_first_ordered_extent(inode, end); + if (!ordered) { + break; + } + if (ordered->file_offset >= start + len) { + btrfs_put_ordered_extent(ordered); + break; + } + if (ordered->file_offset + ordered->len < start) { + btrfs_put_ordered_extent(ordered); + break; + } + btrfs_start_ordered_extent(inode, ordered, should_wait); + found++; + end = ordered->file_offset; + btrfs_put_ordered_extent(ordered); + if (end == 0) + break; + end--; + } + if (should_wait && found) { + should_wait = 0; + goto again; } - spin_unlock(&root->fs_info->new_trans_lock); } -int btrfs_ordered_throttle(struct btrfs_root *root, struct inode *inode) +int btrfs_add_ordered_pending(struct inode *inode, + struct btrfs_ordered_extent *ordered, + u64 start, u64 len) { - struct btrfs_transaction *cur = root->fs_info->running_transaction; - while(cur == root->fs_info->running_transaction && - atomic_read(&BTRFS_I(inode)->ordered_writeback)) { -#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) - congestion_wait(WRITE, HZ/20); -#else - blk_congestion_wait(WRITE, HZ/20); -#endif - } + WARN_ON(1); return 0; +#if 0 + int ret; + struct btrfs_ordered_inode_tree *tree; + struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + + tree = &BTRFS_I(inode)->ordered_tree; + mutex_lock(&tree->mutex); + if (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags)) { + ret = -EAGAIN; + goto out; + } + set_extent_ordered(io_tree, start, start + len - 1, GFP_NOFS); + ret = 0; +out: + mutex_unlock(&tree->mutex); + return ret; +#endif +} + +struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, + u64 file_offset) +{ + struct btrfs_ordered_inode_tree *tree; + struct rb_node *node; + struct btrfs_ordered_extent *entry = NULL; + + tree = &BTRFS_I(inode)->ordered_tree; + mutex_lock(&tree->mutex); + node = tree_search(tree, file_offset); + if (!node) + goto out; + + entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); + if (!offset_in_entry(entry, file_offset)) + entry = NULL; + if (entry) + atomic_inc(&entry->refs); +out: + mutex_unlock(&tree->mutex); + return entry; +} + +struct btrfs_ordered_extent * +btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset) +{ + struct btrfs_ordered_inode_tree *tree; + struct rb_node *node; + struct btrfs_ordered_extent *entry = NULL; + + tree = &BTRFS_I(inode)->ordered_tree; + mutex_lock(&tree->mutex); + node = tree_search(tree, file_offset); + if (!node) + goto out; + + entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); + atomic_inc(&entry->refs); +out: + mutex_unlock(&tree->mutex); + return entry; } diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index 4fa78736423..33292c5fe90 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -20,24 +20,73 @@ #define __BTRFS_ORDERED_DATA__ struct btrfs_ordered_inode_tree { - rwlock_t lock; + struct mutex mutex; struct rb_root tree; + struct rb_node *last; }; +struct btrfs_sector_sum { + u64 offset; + u32 sum; +}; + +struct btrfs_ordered_sum { + u64 file_offset; + u64 len; + struct list_head list; + struct btrfs_sector_sum sums; +}; + +/* bits for the flags field */ +#define BTRFS_ORDERED_IO_DONE 0 /* set when all the pages are written */ +#define BTRFS_ORDERED_COMPLETE 1 /* set when removed from the tree */ +#define BTRFS_ORDERED_START 2 /* set when tree setup */ + +struct btrfs_ordered_extent { + u64 file_offset; + u64 start; + u64 len; + unsigned long flags; + atomic_t refs; + struct list_head list; + struct inode *inode; + wait_queue_head_t wait; + struct rb_node rb_node; +}; + + +static inline int btrfs_ordered_sum_size(struct btrfs_root *root, u64 bytes) +{ + unsigned long num_sectors = (bytes + root->sectorsize - 1) / + root->sectorsize; + return sizeof(struct btrfs_ordered_sum) + + num_sectors * sizeof(struct btrfs_sector_sum); +} + static inline void btrfs_ordered_inode_tree_init(struct btrfs_ordered_inode_tree *t) { - rwlock_init(&t->lock); + mutex_init(&t->mutex); t->tree.rb_node = NULL; + t->last = NULL; } -int btrfs_add_ordered_inode(struct inode *inode); -int btrfs_find_del_first_ordered_inode(struct btrfs_ordered_inode_tree *tree, - u64 *root_objectid, u64 *objectid, - struct inode **inode); -int btrfs_find_first_ordered_inode(struct btrfs_ordered_inode_tree *tree, - u64 *root_objectid, u64 *objectid, - struct inode **inode); -void btrfs_del_ordered_inode(struct inode *inode, int force); -int btrfs_ordered_throttle(struct btrfs_root *root, struct inode *inode); +int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry); +int btrfs_remove_ordered_extent(struct inode *inode, + struct btrfs_ordered_extent *entry); +int btrfs_dec_test_ordered_pending(struct inode *inode, + u64 file_offset, u64 io_size); +int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, + u64 start, u64 len); +int btrfs_add_ordered_sum(struct inode *inode, struct btrfs_ordered_sum *sum); +struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, + u64 file_offset); +void btrfs_wait_ordered_extent(struct inode *inode, + struct btrfs_ordered_extent *entry); +void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len); +struct btrfs_ordered_extent * +btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset); +int btrfs_add_ordered_pending(struct inode *inode, + struct btrfs_ordered_extent *ordered, + u64 start, u64 len); #endif diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index a8a3cb03de5..86a5acc19ce 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -67,7 +67,6 @@ static noinline int join_transaction(struct btrfs_root *root) cur_trans->start_time = get_seconds(); INIT_LIST_HEAD(&cur_trans->pending_snapshots); list_add_tail(&cur_trans->list, &root->fs_info->trans_list); - btrfs_ordered_inode_tree_init(&cur_trans->ordered_inode_tree); extent_io_tree_init(&cur_trans->dirty_pages, root->fs_info->btree_inode->i_mapping, GFP_NOFS); @@ -158,10 +157,12 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, wake_up(&cur_trans->writer_wait); if (cur_trans->in_commit && throttle) { - int ret; + DEFINE_WAIT(wait); mutex_unlock(&root->fs_info->trans_mutex); - ret = wait_for_commit(root, cur_trans); - BUG_ON(ret); + prepare_to_wait(&root->fs_info->transaction_throttle, &wait, + TASK_UNINTERRUPTIBLE); + schedule(); + finish_wait(&root->fs_info->transaction_throttle, &wait); mutex_lock(&root->fs_info->trans_mutex); } @@ -486,58 +487,6 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root, return ret; } -int btrfs_write_ordered_inodes(struct btrfs_trans_handle *trans, - struct btrfs_root *root) -{ - struct btrfs_transaction *cur_trans = trans->transaction; - struct inode *inode; - u64 root_objectid = 0; - u64 objectid = 0; - int ret; - - atomic_inc(&root->fs_info->throttles); - while(1) { - ret = btrfs_find_first_ordered_inode( - &cur_trans->ordered_inode_tree, - &root_objectid, &objectid, &inode); - if (!ret) - break; - - mutex_unlock(&root->fs_info->trans_mutex); - - if (S_ISREG(inode->i_mode)) { - atomic_inc(&BTRFS_I(inode)->ordered_writeback); - filemap_fdatawrite(inode->i_mapping); - atomic_dec(&BTRFS_I(inode)->ordered_writeback); - } - iput(inode); - - mutex_lock(&root->fs_info->trans_mutex); - } - while(1) { - root_objectid = 0; - objectid = 0; - ret = btrfs_find_del_first_ordered_inode( - &cur_trans->ordered_inode_tree, - &root_objectid, &objectid, &inode); - if (!ret) - break; - mutex_unlock(&root->fs_info->trans_mutex); - - if (S_ISREG(inode->i_mode)) { - atomic_inc(&BTRFS_I(inode)->ordered_writeback); - filemap_write_and_wait(inode->i_mapping); - atomic_dec(&BTRFS_I(inode)->ordered_writeback); - } - atomic_dec(&inode->i_count); - iput(inode); - - mutex_lock(&root->fs_info->trans_mutex); - } - atomic_dec(&root->fs_info->throttles); - return 0; -} - static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, struct btrfs_pending_snapshot *pending) @@ -666,6 +615,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, extent_io_tree_init(pinned_copy, root->fs_info->btree_inode->i_mapping, GFP_NOFS); +printk("commit trans %Lu\n", trans->transid); trans->transaction->in_commit = 1; cur_trans = trans->transaction; if (cur_trans->list.prev != &root->fs_info->trans_list) { @@ -699,8 +649,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, mutex_lock(&root->fs_info->trans_mutex); finish_wait(&cur_trans->writer_wait, &wait); - ret = btrfs_write_ordered_inodes(trans, root); - } while (cur_trans->num_writers > 1 || (cur_trans->num_joined != joined)); @@ -736,6 +684,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, btrfs_copy_pinned(root, pinned_copy); + wake_up(&root->fs_info->transaction_throttle); + mutex_unlock(&root->fs_info->trans_mutex); ret = btrfs_write_and_wait_transaction(trans, root); BUG_ON(ret); @@ -758,6 +708,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, list_splice_init(&dirty_fs_roots, &root->fs_info->dead_roots); mutex_unlock(&root->fs_info->trans_mutex); +printk("done commit trans %Lu\n", trans->transid); kmem_cache_free(btrfs_trans_handle_cachep, trans); if (root->fs_info->closing) { diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 9ccd5a5b170..910350cd4cf 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -19,7 +19,6 @@ #ifndef __BTRFS_TRANSACTION__ #define __BTRFS_TRANSACTION__ #include "btrfs_inode.h" -#include "ordered-data.h" struct btrfs_transaction { u64 transid; @@ -31,7 +30,6 @@ struct btrfs_transaction { struct list_head list; struct extent_io_tree dirty_pages; unsigned long start_time; - struct btrfs_ordered_inode_tree ordered_inode_tree; wait_queue_head_t writer_wait; wait_queue_head_t commit_wait; struct list_head pending_snapshots; @@ -88,8 +86,6 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly); int btrfs_clean_old_snapshots(struct btrfs_root *root); int btrfs_commit_transaction(struct btrfs_trans_handle *trans, struct btrfs_root *root); -int btrfs_write_ordered_inodes(struct btrfs_trans_handle *trans, - struct btrfs_root *root); int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans, struct btrfs_root *root); #endif -- cgit v1.2.3-70-g09d2 From dbe674a99c8af088faa4c95eddaeb271a3140ab6 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Thu, 17 Jul 2008 12:54:05 -0400 Subject: Btrfs: Update on disk i_size only after pending ordered extents are done This changes the ordered data code to update i_size after the extent is on disk. An on disk i_size is maintained in the in-memory btrfs inode structures, and this is updated as extents finish. Signed-off-by: Chris Mason --- fs/btrfs/btrfs_inode.h | 9 +++++ fs/btrfs/file.c | 2 +- fs/btrfs/inode.c | 28 ++++++++++------ fs/btrfs/ordered-data.c | 89 +++++++++++++++++++++++++++++++++++++++++++++++++ fs/btrfs/ordered-data.h | 2 ++ 5 files changed, 119 insertions(+), 11 deletions(-) (limited to 'fs/btrfs/ordered-data.c') diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 8d03687510e..81c0444f37b 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -40,11 +40,20 @@ struct btrfs_inode { */ u64 last_trans; u64 delalloc_bytes; + u64 disk_i_size; u32 flags; }; + static inline struct btrfs_inode *BTRFS_I(struct inode *inode) { return container_of(inode, struct btrfs_inode, vfs_inode); } +static inline void btrfs_i_size_write(struct inode *inode, u64 size) +{ + inode->i_size = size; + BTRFS_I(inode)->disk_i_size = size; +} + + #endif diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 20928639d17..3e4e5c227c0 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -338,7 +338,7 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans, btrfs_update_inode(trans, root, inode); } failed: - err = btrfs_end_transaction(trans, root); + err = btrfs_end_transaction_throttle(trans, root); out_unlock: unlock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS); return err; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 47a008c1930..baf46017d0d 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -542,6 +542,7 @@ int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, add_pending_csums(trans, inode, ordered_extent->file_offset, &ordered_extent->list); + btrfs_ordered_update_i_size(inode, ordered_extent); btrfs_remove_ordered_extent(inode, ordered_extent); /* once for us */ btrfs_put_ordered_extent(ordered_extent); @@ -792,7 +793,7 @@ void btrfs_read_locked_inode(struct inode *inode) inode->i_nlink = btrfs_inode_nlink(leaf, inode_item); inode->i_uid = btrfs_inode_uid(leaf, inode_item); inode->i_gid = btrfs_inode_gid(leaf, inode_item); - inode->i_size = btrfs_inode_size(leaf, inode_item); + btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item)); tspec = btrfs_inode_atime(inode_item); inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec); @@ -860,7 +861,7 @@ static void fill_inode_item(struct extent_buffer *leaf, { btrfs_set_inode_uid(leaf, item, inode->i_uid); btrfs_set_inode_gid(leaf, item, inode->i_gid); - btrfs_set_inode_size(leaf, item, inode->i_size); + btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size); btrfs_set_inode_mode(leaf, item, inode->i_mode); btrfs_set_inode_nlink(leaf, item, inode->i_nlink); @@ -982,7 +983,7 @@ static int btrfs_unlink_trans(struct btrfs_trans_handle *trans, err: btrfs_free_path(path); if (!ret) { - dir->i_size -= name_len * 2; + btrfs_i_size_write(dir, dir->i_size - name_len * 2); dir->i_mtime = dir->i_ctime = CURRENT_TIME; btrfs_update_inode(trans, root, dir); #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18) @@ -1044,7 +1045,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) /* now the directory is empty */ err = btrfs_unlink_trans(trans, root, dir, dentry); if (!err) { - inode->i_size = 0; + btrfs_i_size_write(inode, 0); } nr = trans->blocks_used; @@ -1089,7 +1090,6 @@ static int btrfs_truncate_in_trans(struct btrfs_trans_handle *trans, int extent_type = -1; u64 mask = root->sectorsize - 1; - btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1); btrfs_drop_extent_cache(inode, inode->i_size & (~mask), (u64)-1); path = btrfs_alloc_path(); path->reada = -1; @@ -1427,7 +1427,7 @@ void btrfs_delete_inode(struct inode *inode) goto no_delete; } - inode->i_size = 0; + btrfs_i_size_write(inode, 0); trans = btrfs_start_transaction(root, 1); btrfs_set_trans_block_group(trans, inode); @@ -1561,6 +1561,7 @@ static int btrfs_init_locked_inode(struct inode *inode, void *p) inode->i_ino = args->ino; BTRFS_I(inode)->root = args->root; BTRFS_I(inode)->delalloc_bytes = 0; + BTRFS_I(inode)->disk_i_size = 0; extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS); extent_io_tree_init(&BTRFS_I(inode)->io_tree, inode->i_mapping, GFP_NOFS); @@ -1869,6 +1870,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, inode->i_mapping, GFP_NOFS); mutex_init(&BTRFS_I(inode)->csum_mutex); BTRFS_I(inode)->delalloc_bytes = 0; + BTRFS_I(inode)->disk_i_size = 0; BTRFS_I(inode)->root = root; if (mode & S_IFDIR) @@ -1964,7 +1966,8 @@ static int btrfs_add_link(struct btrfs_trans_handle *trans, dentry->d_parent->d_inode->i_ino); } parent_inode = dentry->d_parent->d_inode; - parent_inode->i_size += dentry->d_name.len * 2; + btrfs_i_size_write(parent_inode, parent_inode->i_size + + dentry->d_name.len * 2); parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME; ret = btrfs_update_inode(trans, root, dentry->d_parent->d_inode); @@ -2092,6 +2095,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, inode->i_mapping, GFP_NOFS); mutex_init(&BTRFS_I(inode)->csum_mutex); BTRFS_I(inode)->delalloc_bytes = 0; + BTRFS_I(inode)->disk_i_size = 0; BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; } dir->i_sb->s_dirt = 1; @@ -2199,7 +2203,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) inode->i_fop = &btrfs_dir_file_operations; btrfs_set_trans_block_group(trans, inode); - inode->i_size = 0; + btrfs_i_size_write(inode, 0); err = btrfs_update_inode(trans, root, inode); if (err) goto out_fail; @@ -2756,6 +2760,7 @@ static void btrfs_truncate(struct inode *inode) int ret; struct btrfs_trans_handle *trans; unsigned long nr; + u64 mask = root->sectorsize - 1; if (!S_ISREG(inode->i_mode)) return; @@ -2766,6 +2771,8 @@ static void btrfs_truncate(struct inode *inode) trans = btrfs_start_transaction(root, 1); btrfs_set_trans_block_group(trans, inode); + btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1); + btrfs_i_size_write(inode, inode->i_size); /* FIXME, add redo link to tree so we don't leak on crash */ ret = btrfs_truncate_in_trans(trans, root, inode, @@ -2821,7 +2828,7 @@ int btrfs_create_subvol_root(struct btrfs_root *new_root, ret = btrfs_insert_inode_ref(trans, new_root, "..", 2, new_dirid, new_dirid); inode->i_nlink = 1; - inode->i_size = 0; + btrfs_i_size_write(inode, 0); return btrfs_update_inode(trans, new_root, inode); } @@ -3069,6 +3076,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, inode->i_mapping, GFP_NOFS); mutex_init(&BTRFS_I(inode)->csum_mutex); BTRFS_I(inode)->delalloc_bytes = 0; + BTRFS_I(inode)->disk_i_size = 0; BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; } dir->i_sb->s_dirt = 1; @@ -3103,7 +3111,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, inode->i_op = &btrfs_symlink_inode_operations; inode->i_mapping->a_ops = &btrfs_symlink_aops; inode->i_mapping->backing_dev_info = &root->fs_info->bdi; - inode->i_size = name_len - 1; + btrfs_i_size_write(inode, name_len - 1); err = btrfs_update_inode(trans, root, inode); if (err) drop_inode = 1; diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 6513270f054..d86a953ae51 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -406,3 +406,92 @@ out: mutex_unlock(&tree->mutex); return entry; } + +int btrfs_ordered_update_i_size(struct inode *inode, + struct btrfs_ordered_extent *ordered) +{ + struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; + struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + u64 disk_i_size; + u64 new_i_size; + u64 i_size_test; + struct rb_node *node; + struct btrfs_ordered_extent *test; + + mutex_lock(&tree->mutex); + disk_i_size = BTRFS_I(inode)->disk_i_size; + + /* + * if the disk i_size is already at the inode->i_size, or + * this ordered extent is inside the disk i_size, we're done + */ + if (disk_i_size >= inode->i_size || + ordered->file_offset + ordered->len <= disk_i_size) { + goto out; + } + + /* + * we can't update the disk_isize if there are delalloc bytes + * between disk_i_size and this ordered extent + */ + if (test_range_bit(io_tree, disk_i_size, + ordered->file_offset + ordered->len - 1, + EXTENT_DELALLOC, 0)) { + goto out; + } + /* + * walk backward from this ordered extent to disk_i_size. + * if we find an ordered extent then we can't update disk i_size + * yet + */ + while(1) { + node = rb_prev(&ordered->rb_node); + if (!node) + break; + test = rb_entry(node, struct btrfs_ordered_extent, rb_node); + if (test->file_offset + test->len <= disk_i_size) + break; + if (test->file_offset >= inode->i_size) + break; + if (test->file_offset >= disk_i_size) + goto out; + } + new_i_size = min_t(u64, entry_end(ordered), i_size_read(inode)); + + /* + * at this point, we know we can safely update i_size to at least + * the offset from this ordered extent. But, we need to + * walk forward and see if ios from higher up in the file have + * finished. + */ + node = rb_next(&ordered->rb_node); + i_size_test = 0; + if (node) { + /* + * do we have an area where IO might have finished + * between our ordered extent and the next one. + */ + test = rb_entry(node, struct btrfs_ordered_extent, rb_node); + if (test->file_offset > entry_end(ordered)) { + i_size_test = test->file_offset - 1; + } + } else { + i_size_test = i_size_read(inode); + } + + /* + * i_size_test is the end of a region after this ordered + * extent where there are no ordered extents. As long as there + * are no delalloc bytes in this area, it is safe to update + * disk_i_size to the end of the region. + */ + if (i_size_test > entry_end(ordered) && + !test_range_bit(io_tree, entry_end(ordered), i_size_test, + EXTENT_DELALLOC, 0)) { + new_i_size = min_t(u64, i_size_test, i_size_read(inode)); + } + BTRFS_I(inode)->disk_i_size = new_i_size; +out: + mutex_unlock(&tree->mutex); + return 0; +} diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index 33292c5fe90..40e9126ad95 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -89,4 +89,6 @@ btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset); int btrfs_add_ordered_pending(struct inode *inode, struct btrfs_ordered_extent *ordered, u64 start, u64 len); +int btrfs_ordered_update_i_size(struct inode *inode, + struct btrfs_ordered_extent *ordered); #endif -- cgit v1.2.3-70-g09d2 From ba1da2f442ec91a1534afa893f9bef7e33056ace Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Thu, 17 Jul 2008 12:54:15 -0400 Subject: Btrfs: Don't pin pages in ram until the entire ordered extent is on disk. Checksum items are not inserted until the entire ordered extent is on disk, but individual pages might be clean and available for reclaim long before the whole extent is on disk. In order to allow those pages to be freed, we need to be able to search the list of ordered extents to find the checksum that is going to be inserted in the tree. This way if the page needs to be read back in before the checksums are in the btree, we'll be able to verify the checksum on the page. This commit adds the ability to search the pending ordered extents for a given offset in the file, and changes btrfs_releasepage to allow ordered pages to be freed. Signed-off-by: Chris Mason --- fs/btrfs/file.c | 2 +- fs/btrfs/inode.c | 37 +++++++++++++++++++++---------------- fs/btrfs/ordered-data.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++-- fs/btrfs/ordered-data.h | 1 + 4 files changed, 69 insertions(+), 19 deletions(-) (limited to 'fs/btrfs/ordered-data.c') diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index d6505892cd5..3e4e5c227c0 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -251,7 +251,7 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans, end_of_last_block = start_pos + num_bytes - 1; lock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS); - trans = btrfs_join_transaction(root, 1); + trans = btrfs_start_transaction(root, 1); if (!trans) { err = -ENOMEM; goto out_unlock; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 0a687326c0b..293355c92a4 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -382,7 +382,7 @@ mapit: return btrfs_map_bio(root, rw, bio, mirror_num, 0); } -static int add_pending_csums(struct btrfs_trans_handle *trans, +static noinline int add_pending_csums(struct btrfs_trans_handle *trans, struct inode *inode, u64 file_offset, struct list_head *list) { @@ -390,15 +390,12 @@ static int add_pending_csums(struct btrfs_trans_handle *trans, struct btrfs_ordered_sum *sum; btrfs_set_trans_block_group(trans, inode); - while(!list_empty(list)) { - cur = list->next; + list_for_each(cur, list) { sum = list_entry(cur, struct btrfs_ordered_sum, list); mutex_lock(&BTRFS_I(inode)->csum_mutex); btrfs_csum_file_blocks(trans, BTRFS_I(inode)->root, inode, sum); mutex_unlock(&BTRFS_I(inode)->csum_mutex); - list_del(&sum->list); - kfree(sum); } return 0; } @@ -498,9 +495,8 @@ int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, int ret; ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1); - if (!ret) { + if (!ret) return 0; - } trans = btrfs_join_transaction(root, 1); @@ -571,6 +567,18 @@ int btrfs_readpage_io_hook(struct page *page, u64 start, u64 end) path = btrfs_alloc_path(); item = btrfs_lookup_csum(NULL, root, path, inode->i_ino, start, 0); if (IS_ERR(item)) { + /* + * It is possible there is an ordered extent that has + * not yet finished for this range in the file. If so, + * that extent will have a csum cached, and it will insert + * the sum after all the blocks in the extent are fully + * on disk. So, look for an ordered extent and use the + * sum if found. + */ + ret = btrfs_find_ordered_sum(inode, start, &csum); + if (ret == 0) + goto found; + ret = PTR_ERR(item); /* a csum that isn't present is a preallocated region. */ if (ret == -ENOENT || ret == -EFBIG) @@ -582,6 +590,7 @@ int btrfs_readpage_io_hook(struct page *page, u64 start, u64 end) } read_extent_buffer(path->nodes[0], &csum, (unsigned long)item, BTRFS_CRC32_SIZE); +found: set_state_private(io_tree, start, csum); out: if (path) @@ -888,7 +897,7 @@ static void fill_inode_item(struct extent_buffer *leaf, BTRFS_I(inode)->block_group->key.objectid); } -int btrfs_update_inode(struct btrfs_trans_handle *trans, +int noinline btrfs_update_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode) { @@ -1567,6 +1576,7 @@ static int btrfs_init_locked_inode(struct inode *inode, void *p) inode->i_mapping, GFP_NOFS); extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree, inode->i_mapping, GFP_NOFS); + btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree); mutex_init(&BTRFS_I(inode)->csum_mutex); return 0; } @@ -1868,6 +1878,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, inode->i_mapping, GFP_NOFS); extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree, inode->i_mapping, GFP_NOFS); + btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree); mutex_init(&BTRFS_I(inode)->csum_mutex); BTRFS_I(inode)->delalloc_bytes = 0; BTRFS_I(inode)->disk_i_size = 0; @@ -2097,6 +2108,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, BTRFS_I(inode)->delalloc_bytes = 0; BTRFS_I(inode)->disk_i_size = 0; BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; + btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree); } dir->i_sb->s_dirt = 1; btrfs_update_inode_block_group(trans, inode); @@ -2618,14 +2630,6 @@ static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags) static int btrfs_releasepage(struct page *page, gfp_t gfp_flags) { - struct btrfs_ordered_extent *ordered; - - ordered = btrfs_lookup_ordered_extent(page->mapping->host, - page_offset(page)); - if (ordered) { - btrfs_put_ordered_extent(ordered); - return 0; - } return __btrfs_releasepage(page, gfp_flags); } @@ -3078,6 +3082,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, BTRFS_I(inode)->delalloc_bytes = 0; BTRFS_I(inode)->disk_i_size = 0; BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; + btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree); } dir->i_sb->s_dirt = 1; btrfs_update_inode_block_group(trans, inode); diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index d86a953ae51..b739e3abebb 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -245,8 +245,18 @@ out: int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) { - if (atomic_dec_and_test(&entry->refs)) + struct list_head *cur; + struct btrfs_ordered_sum *sum; + + if (atomic_dec_and_test(&entry->refs)) { + while(!list_empty(&entry->list)) { + cur = entry->list.next; + sum = list_entry(cur, struct btrfs_ordered_sum, list); + list_del(&sum->list); + kfree(sum); + } kfree(entry); + } return 0; } @@ -444,8 +454,9 @@ int btrfs_ordered_update_i_size(struct inode *inode, * if we find an ordered extent then we can't update disk i_size * yet */ + node = &ordered->rb_node; while(1) { - node = rb_prev(&ordered->rb_node); + node = rb_prev(node); if (!node) break; test = rb_entry(node, struct btrfs_ordered_extent, rb_node); @@ -495,3 +506,36 @@ out: mutex_unlock(&tree->mutex); return 0; } + +int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u32 *sum) +{ + struct btrfs_ordered_sum *ordered_sum; + struct btrfs_sector_sum *sector_sums; + struct btrfs_ordered_extent *ordered; + struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; + struct list_head *cur; + int ret = 1; + int index; + + ordered = btrfs_lookup_ordered_extent(inode, offset); + if (!ordered) + return 1; + + mutex_lock(&tree->mutex); + list_for_each_prev(cur, &ordered->list) { + ordered_sum = list_entry(cur, struct btrfs_ordered_sum, list); + if (offset >= ordered_sum->file_offset && + offset < ordered_sum->file_offset + ordered_sum->len) { + index = (offset - ordered_sum->file_offset) / + BTRFS_I(inode)->root->sectorsize;; + sector_sums = &ordered_sum->sums; + *sum = sector_sums[index].sum; + ret = 0; + goto out; + } + } +out: + mutex_unlock(&tree->mutex); + return ret; +} + diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index 40e9126ad95..33f0d9e91b1 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -91,4 +91,5 @@ int btrfs_add_ordered_pending(struct inode *inode, u64 start, u64 len); int btrfs_ordered_update_i_size(struct inode *inode, struct btrfs_ordered_extent *ordered); +int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u32 *sum); #endif -- cgit v1.2.3-70-g09d2 From eb84ae039e10f1f80443d846ba1350122bbdc753 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Thu, 17 Jul 2008 13:53:27 -0400 Subject: Btrfs: Cleanup and comment ordered-data.c Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 12 ++++- fs/btrfs/ordered-data.c | 130 ++++++++++++++++++++++++++---------------------- fs/btrfs/ordered-data.h | 49 +++++++++++++++--- 3 files changed, 121 insertions(+), 70 deletions(-) (limited to 'fs/btrfs/ordered-data.c') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 0d1e2ba47a9..f37e09e724f 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1343,7 +1343,7 @@ again: unlock_extent(io_tree, page_start, page_end, GFP_NOFS); unlock_page(page); page_cache_release(page); - btrfs_wait_ordered_extent(inode, ordered); + btrfs_start_ordered_extent(inode, ordered, 1); btrfs_put_ordered_extent(ordered); goto again; } @@ -2660,6 +2660,10 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset) ordered = btrfs_lookup_ordered_extent(page->mapping->host, page_offset(page)); if (ordered) { + /* + * IO on this page will never be started, so we need + * to account for any ordered extents now + */ clear_extent_bit(tree, page_start, page_end, EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_LOCKED, 1, 0, GFP_NOFS); @@ -2732,11 +2736,15 @@ again: lock_extent(io_tree, page_start, page_end, GFP_NOFS); set_page_extent_mapped(page); + /* + * we can't set the delalloc bits if there are pending ordered + * extents. Drop our locks and wait for them to finish + */ ordered = btrfs_lookup_ordered_extent(inode, page_start); if (ordered) { unlock_extent(io_tree, page_start, page_end, GFP_NOFS); unlock_page(page); - btrfs_wait_ordered_extent(inode, ordered); + btrfs_start_ordered_extent(inode, ordered, 1); btrfs_put_ordered_extent(ordered); goto again; } diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index b739e3abebb..230fd3ca6b2 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -136,6 +136,19 @@ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, return ret; } +/* allocate and add a new ordered_extent into the per-inode tree. + * file_offset is the logical offset in the file + * + * start is the disk block number of an extent already reserved in the + * extent allocation tree + * + * len is the length of the extent + * + * This also sets the EXTENT_ORDERED bit on the range in the inode. + * + * The tree is given a single reference on the ordered extent that was + * inserted. + */ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, u64 start, u64 len) { @@ -152,7 +165,6 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, entry->file_offset = file_offset; entry->start = start; entry->len = len; - entry->inode = inode; /* one ref for the tree */ atomic_set(&entry->refs, 1); init_waitqueue_head(&entry->wait); @@ -167,12 +179,15 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, set_extent_ordered(&BTRFS_I(inode)->io_tree, file_offset, entry_end(entry) - 1, GFP_NOFS); - set_bit(BTRFS_ORDERED_START, &entry->flags); mutex_unlock(&tree->mutex); BUG_ON(node); return 0; } +/* + * Add a struct btrfs_ordered_sum into the list of checksums to be inserted + * when an ordered extent is finished. + */ int btrfs_add_ordered_sum(struct inode *inode, struct btrfs_ordered_sum *sum) { struct btrfs_ordered_inode_tree *tree; @@ -182,29 +197,25 @@ int btrfs_add_ordered_sum(struct inode *inode, struct btrfs_ordered_sum *sum) tree = &BTRFS_I(inode)->ordered_tree; mutex_lock(&tree->mutex); node = tree_search(tree, sum->file_offset); - if (!node) { -search_fail: -printk("add ordered sum failed to find a node for inode %lu offset %Lu\n", inode->i_ino, sum->file_offset); - node = rb_first(&tree->tree); - while(node) { - entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); - printk("entry %Lu %Lu %Lu\n", entry->file_offset, entry->file_offset + entry->len, entry->start); - node = rb_next(node); - } - BUG(); - } BUG_ON(!node); entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); - if (!offset_in_entry(entry, sum->file_offset)) { - goto search_fail; - } + BUG_ON(!offset_in_entry(entry, sum->file_offset)); list_add_tail(&sum->list, &entry->list); mutex_unlock(&tree->mutex); return 0; } +/* + * this is used to account for finished IO across a given range + * of the file. The IO should not span ordered extents. If + * a given ordered_extent is completely done, 1 is returned, otherwise + * 0. + * + * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used + * to make sure this function only returns 1 once for a given ordered extent. + */ int btrfs_dec_test_ordered_pending(struct inode *inode, u64 file_offset, u64 io_size) { @@ -233,9 +244,6 @@ int btrfs_dec_test_ordered_pending(struct inode *inode, ret = test_range_bit(io_tree, entry->file_offset, entry->file_offset + entry->len - 1, EXTENT_ORDERED, 0); - if (!test_bit(BTRFS_ORDERED_START, &entry->flags)) { -printk("inode %lu not ready yet for extent %Lu %Lu\n", inode->i_ino, entry->file_offset, entry_end(entry)); - } if (ret == 0) ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); out: @@ -243,6 +251,10 @@ out: return ret == 0; } +/* + * used to drop a reference on an ordered extent. This will free + * the extent if the last reference is dropped + */ int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) { struct list_head *cur; @@ -260,6 +272,10 @@ int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) return 0; } +/* + * remove an ordered extent from the tree. No references are dropped + * but, anyone waiting on this extent is woken up. + */ int btrfs_remove_ordered_extent(struct inode *inode, struct btrfs_ordered_extent *entry) { @@ -277,27 +293,25 @@ int btrfs_remove_ordered_extent(struct inode *inode, return 0; } -void btrfs_wait_ordered_extent(struct inode *inode, - struct btrfs_ordered_extent *entry) -{ - u64 start = entry->file_offset; - u64 end = start + entry->len - 1; -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) - do_sync_file_range(file, start, end, SYNC_FILE_RANGE_WRITE); -#else - do_sync_mapping_range(inode->i_mapping, start, end, - SYNC_FILE_RANGE_WRITE); -#endif - wait_event(entry->wait, - test_bit(BTRFS_ORDERED_COMPLETE, &entry->flags)); -} - -static void btrfs_start_ordered_extent(struct inode *inode, - struct btrfs_ordered_extent *entry, int wait) +/* + * Used to start IO or wait for a given ordered extent to finish. + * + * If wait is one, this effectively waits on page writeback for all the pages + * in the extent, and it waits on the io completion code to insert + * metadata into the btree corresponding to the extent + */ +void btrfs_start_ordered_extent(struct inode *inode, + struct btrfs_ordered_extent *entry, + int wait) { u64 start = entry->file_offset; u64 end = start + entry->len - 1; + /* + * pages in the range can be dirty, clean or writeback. We + * start IO on any dirty ones so the wait doesn't stall waiting + * for pdflush to find them + */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) do_sync_file_range(file, start, end, SYNC_FILE_RANGE_WRITE); #else @@ -309,6 +323,9 @@ static void btrfs_start_ordered_extent(struct inode *inode, &entry->flags)); } +/* + * Used to wait on ordered extents across a large range of bytes. + */ void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) { u64 end; @@ -349,31 +366,11 @@ again: } } -int btrfs_add_ordered_pending(struct inode *inode, - struct btrfs_ordered_extent *ordered, - u64 start, u64 len) -{ - WARN_ON(1); - return 0; -#if 0 - int ret; - struct btrfs_ordered_inode_tree *tree; - struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; - - tree = &BTRFS_I(inode)->ordered_tree; - mutex_lock(&tree->mutex); - if (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags)) { - ret = -EAGAIN; - goto out; - } - set_extent_ordered(io_tree, start, start + len - 1, GFP_NOFS); - ret = 0; -out: - mutex_unlock(&tree->mutex); - return ret; -#endif -} +/* + * find an ordered extent corresponding to file_offset. return NULL if + * nothing is found, otherwise take a reference on the extent and return it + */ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, u64 file_offset) { @@ -397,6 +394,10 @@ out: return entry; } +/* + * lookup and return any extent before 'file_offset'. NULL is returned + * if none is found + */ struct btrfs_ordered_extent * btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset) { @@ -417,6 +418,10 @@ out: return entry; } +/* + * After an extent is done, call this to conditionally update the on disk + * i_size. i_size is updated to cover any fully written part of the file. + */ int btrfs_ordered_update_i_size(struct inode *inode, struct btrfs_ordered_extent *ordered) { @@ -507,6 +512,11 @@ out: return 0; } +/* + * search the ordered extents for one corresponding to 'offset' and + * try to find a checksum. This is used because we allow pages to + * be reclaimed before their checksum is actually put into the btree + */ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u32 *sum) { struct btrfs_ordered_sum *ordered_sum; diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index 33f0d9e91b1..98f491d1022 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -19,12 +19,19 @@ #ifndef __BTRFS_ORDERED_DATA__ #define __BTRFS_ORDERED_DATA__ +/* one of these per inode */ struct btrfs_ordered_inode_tree { struct mutex mutex; struct rb_root tree; struct rb_node *last; }; +/* + * these are used to collect checksums done just before bios submission. + * They are attached via a list into the ordered extent, and + * checksum items are inserted into the tree after all the blocks in + * the ordered extent are on disk + */ struct btrfs_sector_sum { u64 offset; u32 sum; @@ -34,27 +41,56 @@ struct btrfs_ordered_sum { u64 file_offset; u64 len; struct list_head list; + /* last field is a variable length array of btrfs_sector_sums */ struct btrfs_sector_sum sums; }; -/* bits for the flags field */ +/* + * bits for the flags field: + * + * BTRFS_ORDERED_IO_DONE is set when all of the blocks are written. + * It is used to make sure metadata is inserted into the tree only once + * per extent. + * + * BTRFS_ORDERED_COMPLETE is set when the extent is removed from the + * rbtree, just before waking any waiters. It is used to indicate the + * IO is done and any metadata is inserted into the tree. + */ #define BTRFS_ORDERED_IO_DONE 0 /* set when all the pages are written */ + #define BTRFS_ORDERED_COMPLETE 1 /* set when removed from the tree */ -#define BTRFS_ORDERED_START 2 /* set when tree setup */ struct btrfs_ordered_extent { + /* logical offset in the file */ u64 file_offset; + + /* disk byte number */ u64 start; + + /* length of the extent in bytes */ u64 len; + + /* flags (described above) */ unsigned long flags; + + /* reference count */ atomic_t refs; + + /* list of checksums for insertion when the extent io is done */ struct list_head list; - struct inode *inode; + + /* used to wait for the BTRFS_ORDERED_COMPLETE bit */ wait_queue_head_t wait; + + /* our friendly rbtree entry */ struct rb_node rb_node; }; +/* + * calculates the total size you need to allocate for an ordered sum + * structure spanning 'bytes' in the file + */ static inline int btrfs_ordered_sum_size(struct btrfs_root *root, u64 bytes) { unsigned long num_sectors = (bytes + root->sectorsize - 1) / @@ -81,14 +117,11 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, int btrfs_add_ordered_sum(struct inode *inode, struct btrfs_ordered_sum *sum); struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, u64 file_offset); -void btrfs_wait_ordered_extent(struct inode *inode, - struct btrfs_ordered_extent *entry); +void btrfs_start_ordered_extent(struct inode *inode, + struct btrfs_ordered_extent *entry, int wait); void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len); struct btrfs_ordered_extent * btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset); -int btrfs_add_ordered_pending(struct inode *inode, - struct btrfs_ordered_extent *ordered, - u64 start, u64 len); int btrfs_ordered_update_i_size(struct inode *inode, struct btrfs_ordered_extent *ordered); int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u32 *sum); -- cgit v1.2.3-70-g09d2 From 3edf7d33f4edb1e4a9bb0a4c0a84d95fb4d22a09 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 18 Jul 2008 06:17:13 -0400 Subject: Btrfs: Handle data checksumming on bios that span multiple ordered extents Data checksumming is done right before the bio is sent down the IO stack, which means a single bio might span more than one ordered extent. In this case, the checksumming data is split between two ordered extents. Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 4 ++-- fs/btrfs/file-item.c | 43 +++++++++++++++++++++++++++++++++++++++---- fs/btrfs/inode.c | 6 +----- fs/btrfs/ordered-data.c | 36 ++++++++++++++++++------------------ fs/btrfs/ordered-data.h | 11 +++++++++-- 5 files changed, 69 insertions(+), 31 deletions(-) (limited to 'fs/btrfs/ordered-data.c') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index acbce542d29..96ab2797c09 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1579,8 +1579,8 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, struct btrfs_ordered_sum *sums); -int btrfs_csum_one_bio(struct btrfs_root *root, - struct bio *bio, struct btrfs_ordered_sum **sums_ret); +int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, + struct bio *bio); struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 345caf8ff51..e02f1e5acb0 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -134,26 +134,53 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, return ret; } -int btrfs_csum_one_bio(struct btrfs_root *root, - struct bio *bio, struct btrfs_ordered_sum **sums_ret) +int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, + struct bio *bio) { struct btrfs_ordered_sum *sums; struct btrfs_sector_sum *sector_sum; + struct btrfs_ordered_extent *ordered; char *data; struct bio_vec *bvec = bio->bi_io_vec; int bio_index = 0; + unsigned long total_bytes = 0; + unsigned long this_sum_bytes = 0; + u64 offset; WARN_ON(bio->bi_vcnt <= 0); sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_size), GFP_NOFS); if (!sums) return -ENOMEM; - *sums_ret = sums; + sector_sum = &sums->sums; - sums->file_offset = page_offset(bvec->bv_page); + sums->file_offset = page_offset(bvec->bv_page) + bvec->bv_offset; sums->len = bio->bi_size; INIT_LIST_HEAD(&sums->list); + ordered = btrfs_lookup_ordered_extent(inode, sums->file_offset); + BUG_ON(!ordered); while(bio_index < bio->bi_vcnt) { + offset = page_offset(bvec->bv_page) + bvec->bv_offset; + if (offset >= ordered->file_offset + ordered->len) { + unsigned long bytes_left; + sums->len = this_sum_bytes; + this_sum_bytes = 0; + btrfs_add_ordered_sum(inode, ordered, sums); + btrfs_put_ordered_extent(ordered); + + bytes_left = bio->bi_size - total_bytes; + + sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left), + GFP_NOFS); + BUG_ON(!sums); + sector_sum = &sums->sums; + sums->len = bytes_left; + sums->file_offset = offset; + ordered = btrfs_lookup_ordered_extent(inode, + sums->file_offset); + BUG_ON(!ordered); + } + data = kmap_atomic(bvec->bv_page, KM_USER0); sector_sum->sum = ~(u32)0; sector_sum->sum = btrfs_csum_data(root, @@ -165,10 +192,18 @@ int btrfs_csum_one_bio(struct btrfs_root *root, (char *)§or_sum->sum); sector_sum->offset = page_offset(bvec->bv_page) + bvec->bv_offset; + sector_sum++; bio_index++; + total_bytes += bvec->bv_len; + this_sum_bytes += bvec->bv_len; bvec++; } + btrfs_add_ordered_sum(inode, ordered, sums); + btrfs_put_ordered_extent(ordered); + if (total_bytes != bio->bi_size) { +printk("warning, total bytes %lu bio size %u\n", total_bytes, bio->bi_size); + } return 0; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index f37e09e724f..4d729d90d2b 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -351,12 +351,8 @@ int __btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, { struct btrfs_root *root = BTRFS_I(inode)->root; int ret = 0; - struct btrfs_ordered_sum *sums; - ret = btrfs_csum_one_bio(root, bio, &sums); - BUG_ON(ret); - - ret = btrfs_add_ordered_sum(inode, sums); + ret = btrfs_csum_one_bio(root, inode, bio); BUG_ON(ret); return btrfs_map_bio(root, rw, bio, mirror_num, 1); diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 230fd3ca6b2..1ddb7bceea9 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -186,22 +186,17 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, /* * Add a struct btrfs_ordered_sum into the list of checksums to be inserted - * when an ordered extent is finished. + * when an ordered extent is finished. If the list covers more than one + * ordered extent, it is split across multiples. */ -int btrfs_add_ordered_sum(struct inode *inode, struct btrfs_ordered_sum *sum) +int btrfs_add_ordered_sum(struct inode *inode, + struct btrfs_ordered_extent *entry, + struct btrfs_ordered_sum *sum) { struct btrfs_ordered_inode_tree *tree; - struct rb_node *node; - struct btrfs_ordered_extent *entry; tree = &BTRFS_I(inode)->ordered_tree; mutex_lock(&tree->mutex); - node = tree_search(tree, sum->file_offset); - BUG_ON(!node); - - entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); - BUG_ON(!offset_in_entry(entry, sum->file_offset)); - list_add_tail(&sum->list, &entry->list); mutex_unlock(&tree->mutex); return 0; @@ -524,8 +519,10 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u32 *sum) struct btrfs_ordered_extent *ordered; struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; struct list_head *cur; + unsigned long num_sectors; + unsigned long i; + u32 sectorsize = BTRFS_I(inode)->root->sectorsize; int ret = 1; - int index; ordered = btrfs_lookup_ordered_extent(inode, offset); if (!ordered) @@ -534,14 +531,17 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u32 *sum) mutex_lock(&tree->mutex); list_for_each_prev(cur, &ordered->list) { ordered_sum = list_entry(cur, struct btrfs_ordered_sum, list); - if (offset >= ordered_sum->file_offset && - offset < ordered_sum->file_offset + ordered_sum->len) { - index = (offset - ordered_sum->file_offset) / - BTRFS_I(inode)->root->sectorsize;; + if (offset >= ordered_sum->file_offset) { + num_sectors = ordered_sum->len / sectorsize; sector_sums = &ordered_sum->sums; - *sum = sector_sums[index].sum; - ret = 0; - goto out; + for (i = 0; i < num_sectors; i++) { + if (sector_sums[i].offset == offset) { +printk("find ordered sum inode %lu offset %Lu\n", inode->i_ino, offset); + *sum = sector_sums[i].sum; + ret = 0; + goto out; + } + } } } out: diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index 98f491d1022..1794efd13ca 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -39,7 +39,11 @@ struct btrfs_sector_sum { struct btrfs_ordered_sum { u64 file_offset; - u64 len; + /* + * this is the length in bytes covered by the sums array below. + * But, the sums array may not be contiguous in the file. + */ + unsigned long len; struct list_head list; /* last field is a variable length array of btrfs_sector_sums */ struct btrfs_sector_sum sums; @@ -95,6 +99,7 @@ static inline int btrfs_ordered_sum_size(struct btrfs_root *root, u64 bytes) { unsigned long num_sectors = (bytes + root->sectorsize - 1) / root->sectorsize; + num_sectors++; return sizeof(struct btrfs_ordered_sum) + num_sectors * sizeof(struct btrfs_sector_sum); } @@ -114,7 +119,9 @@ int btrfs_dec_test_ordered_pending(struct inode *inode, u64 file_offset, u64 io_size); int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, u64 start, u64 len); -int btrfs_add_ordered_sum(struct inode *inode, struct btrfs_ordered_sum *sum); +int btrfs_add_ordered_sum(struct inode *inode, + struct btrfs_ordered_extent *entry, + struct btrfs_ordered_sum *sum); struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, u64 file_offset); void btrfs_start_ordered_extent(struct inode *inode, -- cgit v1.2.3-70-g09d2 From e5a2217ef6ff088d08a27208929a6f9c635d672c Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 18 Jul 2008 20:42:20 -0400 Subject: Fix btrfs_wait_ordered_extent_range to properly wait Signed-off-by: Chris Mason --- fs/btrfs/file-item.c | 3 ++- fs/btrfs/inode.c | 26 ++++++++++++++++++++------ fs/btrfs/ordered-data.c | 45 +++++++++++++++++++++++++++------------------ 3 files changed, 49 insertions(+), 25 deletions(-) (limited to 'fs/btrfs/ordered-data.c') diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index d9c69e16d36..45127e4797c 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -161,7 +161,8 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, while(bio_index < bio->bi_vcnt) { offset = page_offset(bvec->bv_page) + bvec->bv_offset; - if (offset >= ordered->file_offset + ordered->len) { + if (offset >= ordered->file_offset + ordered->len || + offset < ordered->file_offset) { unsigned long bytes_left; sums->len = this_sum_bytes; this_sum_bytes = 0; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 08dbe738b51..50ee4befac8 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -128,7 +128,9 @@ static int cow_file_range(struct inode *inode, u64 start, u64 end) goto out; BUG_ON(num_bytes > btrfs_super_total_bytes(&root->fs_info->super_copy)); + mutex_lock(&BTRFS_I(inode)->extent_mutex); btrfs_drop_extent_cache(inode, start, start + num_bytes - 1); + mutex_unlock(&BTRFS_I(inode)->extent_mutex); while(num_bytes > 0) { cur_alloc_size = min(num_bytes, root->fs_info->max_extent); @@ -144,6 +146,7 @@ static int cow_file_range(struct inode *inode, u64 start, u64 end) em->len = ins.offset; em->block_start = ins.objectid; em->bdev = root->fs_info->fs_devices->latest_bdev; + mutex_lock(&BTRFS_I(inode)->extent_mutex); set_bit(EXTENT_FLAG_PINNED, &em->flags); while(1) { spin_lock(&em_tree->lock); @@ -156,6 +159,7 @@ static int cow_file_range(struct inode *inode, u64 start, u64 end) btrfs_drop_extent_cache(inode, start, start + ins.offset - 1); } + mutex_unlock(&BTRFS_I(inode)->extent_mutex); cur_alloc_size = ins.offset; ret = btrfs_add_ordered_extent(inode, start, ins.objectid, @@ -487,6 +491,8 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; struct extent_map *em; u64 alloc_hint = 0; + u64 clear_start; + u64 clear_end; struct list_head list; struct btrfs_key ins; int ret; @@ -509,12 +515,14 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) ins.objectid = ordered_extent->start; ins.offset = ordered_extent->len; ins.type = BTRFS_EXTENT_ITEM_KEY; + ret = btrfs_alloc_reserved_extent(trans, root, root->root_key.objectid, trans->transid, inode->i_ino, ordered_extent->file_offset, &ins); BUG_ON(ret); mutex_lock(&BTRFS_I(inode)->extent_mutex); + ret = btrfs_drop_extents(trans, root, inode, ordered_extent->file_offset, ordered_extent->file_offset + @@ -528,13 +536,19 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) ordered_extent->len, 0); BUG_ON(ret); - spin_lock(&em_tree->lock); - em = lookup_extent_mapping(em_tree, ordered_extent->file_offset, - ordered_extent->len); - if (em) { - clear_bit(EXTENT_FLAG_PINNED, &em->flags); - free_extent_map(em); + clear_start = ordered_extent->file_offset; + clear_end = ordered_extent->file_offset + ordered_extent->len; + while(clear_start < clear_end) { + em = lookup_extent_mapping(em_tree, clear_start, + clear_end - clear_start); + if (em) { + clear_bit(EXTENT_FLAG_PINNED, &em->flags); + clear_start = em->start + em->len; + free_extent_map(em); + } else { + break; + } } spin_unlock(&em_tree->lock); diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 1ddb7bceea9..c2b4a9c4ddb 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -324,22 +324,37 @@ void btrfs_start_ordered_extent(struct inode *inode, void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) { u64 end; + u64 orig_end; + u64 wait_end; struct btrfs_ordered_extent *ordered; - int found; - int should_wait = 0; - -again: - if (start + len < start) - end = (u64)-1; - else - end = start + len - 1; - found = 0; + u64 mask = BTRFS_I(inode)->root->sectorsize - 1; + + if (start + len < start) { + wait_end = (inode->i_size + mask) & ~mask; + orig_end = (u64)-1; + } else { + orig_end = start + len - 1; + wait_end = orig_end; + } + + /* start IO across the range first to instantiate any delalloc + * extents + */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) + do_sync_file_range(file, start, wait_end, SYNC_FILE_RANGE_WRITE); +#else + do_sync_mapping_range(inode->i_mapping, start, wait_end, + SYNC_FILE_RANGE_WRITE); +#endif + end = orig_end; + wait_on_extent_writeback(&BTRFS_I(inode)->io_tree, start, orig_end); + while(1) { ordered = btrfs_lookup_first_ordered_extent(inode, end); if (!ordered) { break; } - if (ordered->file_offset >= start + len) { + if (ordered->file_offset > orig_end) { btrfs_put_ordered_extent(ordered); break; } @@ -347,21 +362,15 @@ again: btrfs_put_ordered_extent(ordered); break; } - btrfs_start_ordered_extent(inode, ordered, should_wait); - found++; + btrfs_start_ordered_extent(inode, ordered, 1); end = ordered->file_offset; btrfs_put_ordered_extent(ordered); - if (end == 0) + if (end == 0 || end == start) break; end--; } - if (should_wait && found) { - should_wait = 0; - goto again; - } } - /* * find an ordered extent corresponding to file_offset. return NULL if * nothing is found, otherwise take a reference on the extent and return it -- cgit v1.2.3-70-g09d2 From 4a09675279674041862d2210635b0cc1f60be28e Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 21 Jul 2008 10:29:44 -0400 Subject: Btrfs: Data ordered fixes * In btrfs_delete_inode, wait for ordered extents after calling truncate_inode_pages. This is much faster, and more correct * Properly clear our the PageChecked bit everywhere we redirty the page. * Change the writepage fixup handler to lock the page range and check to see if an ordered extent had been inserted since the improperly dirtied page was discovered * Wait for ordered extents outside the transaction. This isn't required for locking rules but does improve transaction latencies * Reduce contention on the alloc_mutex by dropping it while incrementing refs on a node/leaf and while dropping refs on a leaf. Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 18 +++++++++++++++--- fs/btrfs/file.c | 1 + fs/btrfs/inode.c | 23 ++++++++++++++++++----- fs/btrfs/ordered-data.c | 11 +++++++++-- 4 files changed, 43 insertions(+), 10 deletions(-) (limited to 'fs/btrfs/ordered-data.c') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index be2aef1cb7a..ccd49322f79 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -934,7 +934,6 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, if (!root->ref_cows) return 0; - mutex_lock(&root->fs_info->alloc_mutex); level = btrfs_header_level(buf); nritems = btrfs_header_nritems(buf); for (i = 0; i < nritems; i++) { @@ -951,29 +950,36 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi); if (disk_bytenr == 0) continue; + + mutex_lock(&root->fs_info->alloc_mutex); ret = __btrfs_inc_extent_ref(trans, root, disk_bytenr, btrfs_file_extent_disk_num_bytes(buf, fi), root->root_key.objectid, trans->transid, key.objectid, key.offset); + mutex_unlock(&root->fs_info->alloc_mutex); if (ret) { faili = i; + WARN_ON(1); goto fail; } } else { bytenr = btrfs_node_blockptr(buf, i); btrfs_node_key_to_cpu(buf, &key, i); + + mutex_lock(&root->fs_info->alloc_mutex); ret = __btrfs_inc_extent_ref(trans, root, bytenr, btrfs_level_size(root, level - 1), root->root_key.objectid, trans->transid, level - 1, key.objectid); + mutex_unlock(&root->fs_info->alloc_mutex); if (ret) { faili = i; + WARN_ON(1); goto fail; } } } - mutex_unlock(&root->fs_info->alloc_mutex); return 0; fail: WARN_ON(1); @@ -1004,7 +1010,6 @@ fail: } } #endif - mutex_unlock(&root->fs_info->alloc_mutex); return ret; } @@ -2180,6 +2185,8 @@ static int noinline drop_leaf_ref(struct btrfs_trans_handle *trans, leaf_owner = btrfs_header_owner(leaf); leaf_generation = btrfs_header_generation(leaf); + mutex_unlock(&root->fs_info->alloc_mutex); + for (i = 0; i < nritems; i++) { u64 disk_bytenr; @@ -2197,12 +2204,17 @@ static int noinline drop_leaf_ref(struct btrfs_trans_handle *trans, disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); if (disk_bytenr == 0) continue; + + mutex_lock(&root->fs_info->alloc_mutex); ret = __btrfs_free_extent(trans, root, disk_bytenr, btrfs_file_extent_disk_num_bytes(leaf, fi), leaf_owner, leaf_generation, key.objectid, key.offset, 0); + mutex_unlock(&root->fs_info->alloc_mutex); BUG_ON(ret); } + + mutex_lock(&root->fs_info->alloc_mutex); return 0; } diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index eccdb9562ba..591a30208ac 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -75,6 +75,7 @@ static void btrfs_drop_pages(struct page **pages, size_t num_pages) for (i = 0; i < num_pages; i++) { if (!pages[i]) break; + ClearPageChecked(pages[i]); unlock_page(pages[i]); mark_page_accessed(pages[i]); page_cache_release(pages[i]); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 50ee4befac8..8fb6dc25e7a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -418,7 +418,7 @@ void btrfs_writepage_fixup_worker(struct btrfs_work *work) fixup = container_of(work, struct btrfs_writepage_fixup, work); page = fixup->page; - +again: lock_page(page); if (!page->mapping || !PageDirty(page) || !PageChecked(page)) { ClearPageChecked(page); @@ -430,9 +430,21 @@ void btrfs_writepage_fixup_worker(struct btrfs_work *work) page_end = page_offset(page) + PAGE_CACHE_SIZE - 1; lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS); - ordered = btrfs_lookup_ordered_extent(inode, page_start); - if (ordered) + + /* already ordered? We're done */ + if (test_range_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, + EXTENT_ORDERED, 0)) { goto out; + } + + ordered = btrfs_lookup_ordered_extent(inode, page_start); + if (ordered) { + unlock_extent(&BTRFS_I(inode)->io_tree, page_start, + page_end, GFP_NOFS); + unlock_page(page); + btrfs_start_ordered_extent(inode, ordered, 1); + goto again; + } set_extent_delalloc(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS); @@ -1465,11 +1477,11 @@ void btrfs_delete_inode(struct inode *inode) unsigned long nr; int ret; - btrfs_wait_ordered_range(inode, 0, (u64)-1); truncate_inode_pages(&inode->i_data, 0); if (is_bad_inode(inode)) { goto no_delete; } + btrfs_wait_ordered_range(inode, 0, (u64)-1); btrfs_i_size_write(inode, 0); trans = btrfs_start_transaction(root, 1); @@ -2707,6 +2719,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset) 1, 1, GFP_NOFS); __btrfs_releasepage(page, GFP_NOFS); + ClearPageChecked(page); if (PagePrivate(page)) { invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE); @@ -2818,10 +2831,10 @@ static void btrfs_truncate(struct inode *inode) return; btrfs_truncate_page(inode->i_mapping, inode->i_size); + btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1); trans = btrfs_start_transaction(root, 1); btrfs_set_trans_block_group(trans, inode); - btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1); btrfs_i_size_write(inode, inode->i_size); /* FIXME, add redo link to tree so we don't leak on crash */ diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index c2b4a9c4ddb..0d87795fdd8 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -336,7 +336,7 @@ void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) orig_end = start + len - 1; wait_end = orig_end; } - +again: /* start IO across the range first to instantiate any delalloc * extents */ @@ -369,6 +369,14 @@ void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) break; end--; } + if (test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end, + EXTENT_ORDERED | EXTENT_DELALLOC, 0)) { + printk("inode %lu still ordered or delalloc after wait " + "%llu %llu\n", inode->i_ino, + (unsigned long long)start, + (unsigned long long)orig_end); + goto again; + } } /* @@ -545,7 +553,6 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u32 *sum) sector_sums = &ordered_sum->sums; for (i = 0; i < num_sectors; i++) { if (sector_sums[i].offset == offset) { -printk("find ordered sum inode %lu offset %Lu\n", inode->i_ino, offset); *sum = sector_sums[i].sum; ret = 0; goto out; -- cgit v1.2.3-70-g09d2 From f421950f86bf96a11fef932e167ab2e70d4c43a0 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Tue, 22 Jul 2008 11:18:09 -0400 Subject: Btrfs: Fix some data=ordered related data corruptions Stress testing was showing data checksum errors, most of which were caused by a lookup bug in the extent_map tree. The tree was caching the last pointer returned, and searches would check the last pointer first. But, search callers also expect the search to return the very first matching extent in the range, which wasn't always true with the last pointer usage. For now, the code to cache the last return value is just removed. It is easy to fix, but I think lookups are rare enough that it isn't required anymore. This commit also replaces do_sync_mapping_range with a local copy of the related functions. Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 2 + fs/btrfs/extent_io.c | 20 --------- fs/btrfs/extent_io.h | 1 - fs/btrfs/extent_map.c | 9 ---- fs/btrfs/extent_map.h | 1 - fs/btrfs/file.c | 15 +++---- fs/btrfs/inode.c | 54 +++++++++++++---------- fs/btrfs/ordered-data.c | 115 ++++++++++++++++++++++++++++++++++++++++-------- fs/btrfs/ordered-data.h | 4 ++ fs/btrfs/transaction.c | 2 - 10 files changed, 140 insertions(+), 83 deletions(-) (limited to 'fs/btrfs/ordered-data.c') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 96ab2797c09..f8fccdac305 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1590,6 +1590,8 @@ int btrfs_csum_truncate(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 isize); /* inode.c */ +int btrfs_writepages(struct address_space *mapping, + struct writeback_control *wbc); int btrfs_create_subvol_root(struct btrfs_root *new_root, struct btrfs_trans_handle *trans, u64 new_dirid, struct btrfs_block_group_cache *block_group); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 7380449cb5b..9965993748d 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -97,7 +97,6 @@ void extent_io_tree_init(struct extent_io_tree *tree, spin_lock_init(&tree->lock); spin_lock_init(&tree->buffer_lock); tree->mapping = mapping; - tree->last = NULL; } EXPORT_SYMBOL(extent_io_tree_init); @@ -173,12 +172,6 @@ static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset, struct tree_entry *entry; struct tree_entry *prev_entry = NULL; - if (tree->last) { - struct extent_state *state; - state = tree->last; - if (state->start <= offset && offset <= state->end) - return &tree->last->rb_node; - } while(n) { entry = rb_entry(n, struct tree_entry, rb_node); prev = n; @@ -189,7 +182,6 @@ static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset, else if (offset > entry->end) n = n->rb_right; else { - tree->last = rb_entry(n, struct extent_state, rb_node); return n; } } @@ -223,10 +215,6 @@ static inline struct rb_node *tree_search(struct extent_io_tree *tree, ret = __etree_search(tree, offset, &prev, NULL); if (!ret) { - if (prev) { - tree->last = rb_entry(prev, struct extent_state, - rb_node); - } return prev; } return ret; @@ -301,8 +289,6 @@ static int merge_state(struct extent_io_tree *tree, other->state == state->state) { state->start = other->start; other->tree = NULL; - if (tree->last == other) - tree->last = state; rb_erase(&other->rb_node, &tree->state); free_extent_state(other); } @@ -314,8 +300,6 @@ static int merge_state(struct extent_io_tree *tree, other->state == state->state) { other->start = state->start; state->tree = NULL; - if (tree->last == state) - tree->last = other; rb_erase(&state->rb_node, &tree->state); free_extent_state(state); } @@ -378,7 +362,6 @@ static int insert_state(struct extent_io_tree *tree, return -EEXIST; } state->tree = tree; - tree->last = state; merge_state(tree, state); return 0; } @@ -444,9 +427,6 @@ static int clear_state_bit(struct extent_io_tree *tree, if (delete || state->state == 0) { if (state->tree) { clear_state_cb(tree, state, state->state); - if (tree->last == state) { - tree->last = extent_state_next(state); - } rb_erase(&state->rb_node, &tree->state); state->tree = NULL; free_extent_state(state); diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 6c03e6a1993..315cfceae31 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -60,7 +60,6 @@ struct extent_io_tree { spinlock_t lock; spinlock_t buffer_lock; struct extent_io_ops *ops; - struct extent_state *last; }; struct extent_state { diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 71b1ac15535..8a502ee2f23 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -42,7 +42,6 @@ void extent_map_exit(void) void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask) { tree->map.rb_node = NULL; - tree->last = NULL; spin_lock_init(&tree->lock); } EXPORT_SYMBOL(extent_map_tree_init); @@ -239,7 +238,6 @@ int add_extent_mapping(struct extent_map_tree *tree, merge->in_tree = 0; free_extent_map(merge); } - tree->last = em; out: return ret; } @@ -273,10 +271,6 @@ struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, u64 end = range_end(start, len); BUG_ON(spin_trylock(&tree->lock)); - em = tree->last; - if (em && end > em->start && start < extent_map_end(em)) - goto found; - rb_node = __tree_search(&tree->map, start, &prev, &next); if (!rb_node && prev) { em = rb_entry(prev, struct extent_map, rb_node); @@ -305,7 +299,6 @@ struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, found: atomic_inc(&em->refs); - tree->last = em; out: return em; } @@ -327,8 +320,6 @@ int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) BUG_ON(spin_trylock(&tree->lock)); rb_erase(&em->rb_node, &tree->map); em->in_tree = 0; - if (tree->last == em) - tree->last = NULL; return ret; } EXPORT_SYMBOL(remove_extent_mapping); diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h index a3978ec2784..26ac6fe0b26 100644 --- a/fs/btrfs/extent_map.h +++ b/fs/btrfs/extent_map.h @@ -26,7 +26,6 @@ struct extent_map { struct extent_map_tree { struct rb_root map; - struct extent_map *last; spinlock_t lock; }; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 591a30208ac..e5ffb66ad32 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -381,14 +381,13 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) break; } if (test_bit(EXTENT_FLAG_PINNED, &em->flags)) { - start = em->start + em->len; - free_extent_map(em); - spin_unlock(&em_tree->lock); - if (start < end) { - len = end - start + 1; - continue; - } - break; + printk(KERN_CRIT "inode %lu trying to drop pinned " + "extent start %llu end %llu, em [%llu %llu]\n", + inode->i_ino, + (unsigned long long)start, + (unsigned long long)end, + (unsigned long long)em->start, + (unsigned long long)em->len); } remove_extent_mapping(em_tree, em); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 60852ada658..3da12a4d913 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -485,7 +485,7 @@ int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end) fixup = kzalloc(sizeof(*fixup), GFP_NOFS); if (!fixup) return -EAGAIN; -printk("queueing worker to fixup page %lu %Lu\n", inode->i_ino, page_offset(page)); + SetPageChecked(page); page_cache_get(page); fixup->work.func = btrfs_writepage_fixup_worker; @@ -502,11 +502,13 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; struct extent_map *em; + struct extent_map *em_orig; u64 alloc_hint = 0; u64 clear_start; u64 clear_end; struct list_head list; struct btrfs_key ins; + struct rb_node *rb; int ret; ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1); @@ -535,6 +537,22 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) mutex_lock(&BTRFS_I(inode)->extent_mutex); + spin_lock(&em_tree->lock); + clear_start = ordered_extent->file_offset; + clear_end = ordered_extent->file_offset + ordered_extent->len; + em = lookup_extent_mapping(em_tree, clear_start, + ordered_extent->len); + em_orig = em; + while(em && clear_start < extent_map_end(em) && clear_end > em->start) { + clear_bit(EXTENT_FLAG_PINNED, &em->flags); + rb = rb_next(&em->rb_node); + if (!rb) + break; + em = rb_entry(rb, struct extent_map, rb_node); + } + free_extent_map(em_orig); + spin_unlock(&em_tree->lock); + ret = btrfs_drop_extents(trans, root, inode, ordered_extent->file_offset, ordered_extent->file_offset + @@ -548,22 +566,6 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) ordered_extent->len, 0); BUG_ON(ret); - spin_lock(&em_tree->lock); - clear_start = ordered_extent->file_offset; - clear_end = ordered_extent->file_offset + ordered_extent->len; - while(clear_start < clear_end) { - em = lookup_extent_mapping(em_tree, clear_start, - clear_end - clear_start); - if (em) { - clear_bit(EXTENT_FLAG_PINNED, &em->flags); - clear_start = em->start + em->len; - free_extent_map(em); - } else { - break; - } - } - spin_unlock(&em_tree->lock); - btrfs_drop_extent_cache(inode, ordered_extent->file_offset, ordered_extent->file_offset + ordered_extent->len - 1); @@ -2318,7 +2320,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, u64 extent_end = 0; u64 objectid = inode->i_ino; u32 found_type; - struct btrfs_path *path; + struct btrfs_path *path = NULL; struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_file_extent_item *item; struct extent_buffer *leaf; @@ -2328,9 +2330,6 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct btrfs_trans_handle *trans = NULL; - path = btrfs_alloc_path(); - BUG_ON(!path); - again: spin_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, start, len); @@ -2354,6 +2353,12 @@ again: em->bdev = root->fs_info->fs_devices->latest_bdev; em->start = EXTENT_MAP_HOLE; em->len = (u64)-1; + + if (!path) { + path = btrfs_alloc_path(); + BUG_ON(!path); + } + ret = btrfs_lookup_file_extent(trans, root, path, objectid, start, trans != NULL); if (ret < 0) { @@ -2530,7 +2535,8 @@ insert: } spin_unlock(&em_tree->lock); out: - btrfs_free_path(path); + if (path) + btrfs_free_path(path); if (trans) { ret = btrfs_end_transaction(trans, root); if (!err) { @@ -2643,8 +2649,8 @@ static int btrfs_writepage(struct page *page, struct writeback_control *wbc) return extent_write_full_page(tree, page, btrfs_get_extent, wbc); } -static int btrfs_writepages(struct address_space *mapping, - struct writeback_control *wbc) +int btrfs_writepages(struct address_space *mapping, + struct writeback_control *wbc) { struct extent_io_tree *tree; tree = &BTRFS_I(mapping->host)->io_tree; diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 0d87795fdd8..830dbaea685 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -19,6 +19,8 @@ #include #include #include +#include +#include #include "ctree.h" #include "transaction.h" #include "btrfs_inode.h" @@ -307,12 +309,7 @@ void btrfs_start_ordered_extent(struct inode *inode, * start IO on any dirty ones so the wait doesn't stall waiting * for pdflush to find them */ -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) - do_sync_file_range(file, start, end, SYNC_FILE_RANGE_WRITE); -#else - do_sync_mapping_range(inode->i_mapping, start, end, - SYNC_FILE_RANGE_WRITE); -#endif + btrfs_fdatawrite_range(inode->i_mapping, start, end, WB_SYNC_NONE); if (wait) wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, &entry->flags)); @@ -327,28 +324,26 @@ void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) u64 orig_end; u64 wait_end; struct btrfs_ordered_extent *ordered; - u64 mask = BTRFS_I(inode)->root->sectorsize - 1; if (start + len < start) { - wait_end = (inode->i_size + mask) & ~mask; - orig_end = (u64)-1; + orig_end = INT_LIMIT(loff_t); } else { orig_end = start + len - 1; - wait_end = orig_end; + if (orig_end > INT_LIMIT(loff_t)) + orig_end = INT_LIMIT(loff_t); } + wait_end = orig_end; again: /* start IO across the range first to instantiate any delalloc * extents */ -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) - do_sync_file_range(file, start, wait_end, SYNC_FILE_RANGE_WRITE); -#else - do_sync_mapping_range(inode->i_mapping, start, wait_end, - SYNC_FILE_RANGE_WRITE); -#endif - end = orig_end; - wait_on_extent_writeback(&BTRFS_I(inode)->io_tree, start, orig_end); + btrfs_fdatawrite_range(inode->i_mapping, start, orig_end, WB_SYNC_NONE); + + btrfs_wait_on_page_writeback_range(inode->i_mapping, + start >> PAGE_CACHE_SHIFT, + orig_end >> PAGE_CACHE_SHIFT); + end = orig_end; while(1) { ordered = btrfs_lookup_first_ordered_extent(inode, end); if (!ordered) { @@ -565,3 +560,87 @@ out: return ret; } + +/** + * taken from mm/filemap.c because it isn't exported + * + * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range + * @mapping: address space structure to write + * @start: offset in bytes where the range starts + * @end: offset in bytes where the range ends (inclusive) + * @sync_mode: enable synchronous operation + * + * Start writeback against all of a mapping's dirty pages that lie + * within the byte offsets inclusive. + * + * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as + * opposed to a regular memory cleansing writeback. The difference between + * these two operations is that if a dirty page/buffer is encountered, it must + * be waited upon, and not just skipped over. + */ +int btrfs_fdatawrite_range(struct address_space *mapping, loff_t start, + loff_t end, int sync_mode) +{ + struct writeback_control wbc = { + .sync_mode = sync_mode, + .nr_to_write = mapping->nrpages * 2, + .range_start = start, + .range_end = end, + .for_writepages = 1, + }; + return btrfs_writepages(mapping, &wbc); +} + +/** + * taken from mm/filemap.c because it isn't exported + * + * wait_on_page_writeback_range - wait for writeback to complete + * @mapping: target address_space + * @start: beginning page index + * @end: ending page index + * + * Wait for writeback to complete against pages indexed by start->end + * inclusive + */ +int btrfs_wait_on_page_writeback_range(struct address_space *mapping, + pgoff_t start, pgoff_t end) +{ + struct pagevec pvec; + int nr_pages; + int ret = 0; + pgoff_t index; + + if (end < start) + return 0; + + pagevec_init(&pvec, 0); + index = start; + while ((index <= end) && + (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, + PAGECACHE_TAG_WRITEBACK, + min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) { + unsigned i; + + for (i = 0; i < nr_pages; i++) { + struct page *page = pvec.pages[i]; + + /* until radix tree lookup accepts end_index */ + if (page->index > end) + continue; + + wait_on_page_writeback(page); + if (PageError(page)) + ret = -EIO; + } + pagevec_release(&pvec); + cond_resched(); + } + + /* Check for outstanding write errors */ + if (test_and_clear_bit(AS_ENOSPC, &mapping->flags)) + ret = -ENOSPC; + if (test_and_clear_bit(AS_EIO, &mapping->flags)) + ret = -EIO; + + return ret; +} diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index 1794efd13ca..8e8e3c0404f 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -132,4 +132,8 @@ btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset); int btrfs_ordered_update_i_size(struct inode *inode, struct btrfs_ordered_extent *ordered); int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u32 *sum); +int btrfs_wait_on_page_writeback_range(struct address_space *mapping, + pgoff_t start, pgoff_t end); +int btrfs_fdatawrite_range(struct address_space *mapping, loff_t start, + loff_t end, int sync_mode); #endif diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 05823904ecb..38c75a0256c 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -649,7 +649,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, extent_io_tree_init(pinned_copy, root->fs_info->btree_inode->i_mapping, GFP_NOFS); -printk("commit trans %Lu\n", trans->transid); trans->transaction->in_commit = 1; trans->transaction->blocked = 1; cur_trans = trans->transaction; @@ -745,7 +744,6 @@ printk("commit trans %Lu\n", trans->transid); list_splice_init(&dirty_fs_roots, &root->fs_info->dead_roots); mutex_unlock(&root->fs_info->trans_mutex); -printk("done commit trans %Lu\n", trans->transid); kmem_cache_free(btrfs_trans_handle_cachep, trans); if (root->fs_info->closing) { -- cgit v1.2.3-70-g09d2 From ed98b56a6393c5e150fd5095b9eb7fd7d3cfb041 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Tue, 22 Jul 2008 23:06:42 -0400 Subject: Btrfs: Take the csum mutex while reading checksums Signed-off-by: Chris Mason --- fs/btrfs/file-item.c | 8 +++++--- fs/btrfs/inode.c | 2 ++ fs/btrfs/ordered-data.c | 2 +- fs/btrfs/ordered-data.h | 2 +- fs/btrfs/transaction.c | 3 +++ 5 files changed, 12 insertions(+), 5 deletions(-) (limited to 'fs/btrfs/ordered-data.c') diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 45127e4797c..afe42d00b5a 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -152,7 +152,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, if (!sums) return -ENOMEM; - sector_sum = &sums->sums; + sector_sum = sums->sums; sums->file_offset = page_offset(bvec->bv_page) + bvec->bv_offset; sums->len = bio->bi_size; INIT_LIST_HEAD(&sums->list); @@ -174,7 +174,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left), GFP_NOFS); BUG_ON(!sums); - sector_sum = &sums->sums; + sector_sum = sums->sums; sums->len = bytes_left; sums->file_offset = offset; ordered = btrfs_lookup_ordered_extent(inode, @@ -193,12 +193,14 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, (char *)§or_sum->sum); sector_sum->offset = page_offset(bvec->bv_page) + bvec->bv_offset; + sector_sum++; bio_index++; total_bytes += bvec->bv_len; this_sum_bytes += bvec->bv_len; bvec++; } + this_sum_bytes = 0; btrfs_add_ordered_sum(inode, ordered, sums); btrfs_put_ordered_extent(ordered); return 0; @@ -231,7 +233,7 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, path = btrfs_alloc_path(); BUG_ON(!path); - sector_sum = &sums->sums; + sector_sum = sums->sums; again: next_offset = (u64)-1; found_next = 0; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 3da12a4d913..28e667052ec 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -612,6 +612,7 @@ int btrfs_readpage_io_hook(struct page *page, u64 start, u64 end) return 0; path = btrfs_alloc_path(); + mutex_lock(&BTRFS_I(inode)->csum_mutex); item = btrfs_lookup_csum(NULL, root, path, inode->i_ino, start, 0); if (IS_ERR(item)) { /* @@ -640,6 +641,7 @@ int btrfs_readpage_io_hook(struct page *page, u64 start, u64 end) found: set_state_private(io_tree, start, csum); out: + mutex_unlock(&BTRFS_I(inode)->csum_mutex); if (path) btrfs_free_path(path); return ret; diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 830dbaea685..b695f5b29c4 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -545,7 +545,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u32 *sum) ordered_sum = list_entry(cur, struct btrfs_ordered_sum, list); if (offset >= ordered_sum->file_offset) { num_sectors = ordered_sum->len / sectorsize; - sector_sums = &ordered_sum->sums; + sector_sums = ordered_sum->sums; for (i = 0; i < num_sectors; i++) { if (sector_sums[i].offset == offset) { *sum = sector_sums[i].sum; diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index 8e8e3c0404f..36e63f1f79b 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -46,7 +46,7 @@ struct btrfs_ordered_sum { unsigned long len; struct list_head list; /* last field is a variable length array of btrfs_sector_sums */ - struct btrfs_sector_sum sums; + struct btrfs_sector_sum sums[]; }; /* diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 38c75a0256c..0f756e0175c 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -382,6 +382,9 @@ static noinline int add_dirty_roots(struct btrfs_trans_handle *trans, memcpy(dirty->root, root, sizeof(*root)); dirty->root->node = root->commit_root; dirty->latest_root = root; + spin_lock_init(&dirty->root->node_lock); + mutex_init(&dirty->root->objectid_mutex); + root->commit_root = NULL; root->root_key.offset = root->fs_info->generation; -- cgit v1.2.3-70-g09d2 From 89642229a582a5c2b6d2ed8ec16986387d9a9047 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Thu, 24 Jul 2008 09:41:53 -0400 Subject: Btrfs: Search data ordered extents first for checksums on read Checksum items are not inserted into the tree until all of the io from a given extent is complete. This means one dirty page from an extent may be written, freed, and then read again before the entire extent is on disk and the checksum item is inserted. The checksums themselves are stored in the ordered extent so they can be inserted in bulk when IO is complete. On read, if a checksum item isn't found, the ordered extents were being searched for a checksum record. This all worked most of the time, but the checksum insertion code tries to reduce the number of tree operations by pre-inserting checksum items based on i_size and a few other factors. This means the read code might find a checksum item that hasn't yet really been filled in. This commit changes things to check the ordered extents first and only dive into the btree if nothing was found. This removes the need for extra locking and is more reliable. Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 9 ++++++++- fs/btrfs/extent_io.c | 8 ++++---- fs/btrfs/inode.c | 33 ++++++++++++++++++--------------- fs/btrfs/ordered-data.c | 1 + 4 files changed, 31 insertions(+), 20 deletions(-) (limited to 'fs/btrfs/ordered-data.c') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 86e84a8579e..7ce3f83c5dd 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1011,9 +1011,16 @@ void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) spin_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE); spin_unlock(&em_tree->lock); - if (!em) + if (!em) { + __unplug_io_fn(bdi, page); return; + } + if (em->block_start >= EXTENT_MAP_LAST_BYTE) { + free_extent_map(em); + __unplug_io_fn(bdi, page); + return; + } offset = offset - em->start; btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree, em->block_start + offset, page); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 9965993748d..e3547a992d5 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1949,18 +1949,18 @@ printk("2bad mapping end %Lu cur %Lu\n", end, cur); cur + iosize - 1); } if (!ret) { - unsigned long nr = (last_byte >> PAGE_CACHE_SHIFT) + 1; - nr -= page->index; + unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1; + pnr -= page->index; ret = submit_extent_page(READ, tree, page, sector, iosize, page_offset, - bdev, bio, nr, + bdev, bio, pnr, end_bio_extent_readpage, mirror_num); + nr++; } if (ret) SetPageError(page); cur = cur + iosize; page_offset += iosize; - nr++; } if (!nr) { if (!PageError(page)) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 28e667052ec..0e90315ea80 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -611,22 +611,25 @@ int btrfs_readpage_io_hook(struct page *page, u64 start, u64 end) btrfs_test_flag(inode, NODATASUM)) return 0; + /* + * It is possible there is an ordered extent that has + * not yet finished for this range in the file. If so, + * that extent will have a csum cached, and it will insert + * the sum after all the blocks in the extent are fully + * on disk. So, look for an ordered extent and use the + * sum if found. We have to do this before looking in the + * btree because csum items are pre-inserted based on + * the file size. btrfs_lookup_csum might find an item + * that still hasn't been fully filled. + */ + ret = btrfs_find_ordered_sum(inode, start, &csum); + if (ret == 0) + goto found; + + ret = 0; path = btrfs_alloc_path(); - mutex_lock(&BTRFS_I(inode)->csum_mutex); item = btrfs_lookup_csum(NULL, root, path, inode->i_ino, start, 0); if (IS_ERR(item)) { - /* - * It is possible there is an ordered extent that has - * not yet finished for this range in the file. If so, - * that extent will have a csum cached, and it will insert - * the sum after all the blocks in the extent are fully - * on disk. So, look for an ordered extent and use the - * sum if found. - */ - ret = btrfs_find_ordered_sum(inode, start, &csum); - if (ret == 0) - goto found; - ret = PTR_ERR(item); /* a csum that isn't present is a preallocated region. */ if (ret == -ENOENT || ret == -EFBIG) @@ -641,7 +644,6 @@ int btrfs_readpage_io_hook(struct page *page, u64 start, u64 end) found: set_state_private(io_tree, start, csum); out: - mutex_unlock(&BTRFS_I(inode)->csum_mutex); if (path) btrfs_free_path(path); return ret; @@ -1375,7 +1377,7 @@ again: } if (!PageUptodate(page)) { ret = -EIO; - goto out; + goto out_unlock; } } wait_on_page_writeback(page); @@ -1406,6 +1408,7 @@ again: set_page_dirty(page); unlock_extent(io_tree, page_start, page_end, GFP_NOFS); +out_unlock: unlock_page(page); page_cache_release(page); out: diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index b695f5b29c4..e42fd233e04 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -557,6 +557,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u32 *sum) } out: mutex_unlock(&tree->mutex); + btrfs_put_ordered_extent(ordered); return ret; } -- cgit v1.2.3-70-g09d2 From 3eaa2885276fd6dac7b076a793932428b7168e74 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Thu, 24 Jul 2008 11:57:52 -0400 Subject: Btrfs: Fix the defragmention code and the block relocation code for data=ordered Before setting an extent to delalloc, the code needs to wait for pending ordered extents. Also, the relocation code needs to wait for ordered IO before scanning the block group again. This is because the extents are not removed until the IO for the new extents is finished Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 7 +++++++ fs/btrfs/disk-io.c | 3 +++ fs/btrfs/extent-tree.c | 39 ++++++++++++++++++++++++---------- fs/btrfs/ioctl.c | 21 ++++++++++++------- fs/btrfs/ordered-data.c | 56 +++++++++++++++++++++++++++++++++++++++++++++++-- fs/btrfs/ordered-data.h | 7 +++++++ 6 files changed, 113 insertions(+), 20 deletions(-) (limited to 'fs/btrfs/ordered-data.c') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8ecac2e77a4..6675e916ebc 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -538,6 +538,13 @@ struct btrfs_fs_info { struct list_head dead_roots; atomic_t nr_async_submits; + /* + * this is used by the balancing code to wait for all the pending + * ordered extents + */ + spinlock_t ordered_extent_lock; + struct list_head ordered_extents; + /* * there is a pool of worker threads for checksumming during writes * and a pool for checksumming after reads. This is because readers diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 7ce3f83c5dd..ec01062eb41 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1252,6 +1252,9 @@ struct btrfs_root *open_ctree(struct super_block *sb, fs_info->btree_inode->i_nlink = 1; fs_info->thread_pool_size = min(num_online_cpus() + 2, 8); + INIT_LIST_HEAD(&fs_info->ordered_extents); + spin_lock_init(&fs_info->ordered_extent_lock); + sb->s_blocksize = 4096; sb->s_blocksize_bits = blksize_bits(4096); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index febc6295c7a..f92b297e7da 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2640,6 +2640,7 @@ static int noinline relocate_inode_pages(struct inode *inode, u64 start, struct file_ra_state *ra; unsigned long total_read = 0; unsigned long ra_pages; + struct btrfs_ordered_extent *ordered; struct btrfs_trans_handle *trans; ra = kzalloc(sizeof(*ra), GFP_NOFS); @@ -2658,9 +2659,9 @@ static int noinline relocate_inode_pages(struct inode *inode, u64 start, calc_ra(i, last_index, ra_pages)); } total_read++; - if (((u64)i << PAGE_CACHE_SHIFT) > inode->i_size) +again: + if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode)) goto truncate_racing; - page = grab_cache_page(inode->i_mapping, i); if (!page) { goto out_unlock; @@ -2674,18 +2675,24 @@ static int noinline relocate_inode_pages(struct inode *inode, u64 start, goto out_unlock; } } -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18) - ClearPageDirty(page); -#else - cancel_dirty_page(page, PAGE_CACHE_SIZE); -#endif wait_on_page_writeback(page); - set_page_extent_mapped(page); + page_start = (u64)page->index << PAGE_CACHE_SHIFT; page_end = page_start + PAGE_CACHE_SIZE - 1; - lock_extent(io_tree, page_start, page_end, GFP_NOFS); + ordered = btrfs_lookup_ordered_extent(inode, page_start); + if (ordered) { + unlock_extent(io_tree, page_start, page_end, GFP_NOFS); + unlock_page(page); + page_cache_release(page); + btrfs_start_ordered_extent(inode, ordered, 1); + btrfs_put_ordered_extent(ordered); + goto again; + } + set_page_extent_mapped(page); + + set_extent_delalloc(io_tree, page_start, page_end, GFP_NOFS); set_page_dirty(page); @@ -2694,10 +2701,18 @@ static int noinline relocate_inode_pages(struct inode *inode, u64 start, unlock_page(page); page_cache_release(page); } - balance_dirty_pages_ratelimited_nr(inode->i_mapping, - total_read); out_unlock: + /* we have to start the IO in order to get the ordered extents + * instantiated. This allows the relocation to code to wait + * for all the ordered extents to hit the disk. + * + * Otherwise, it would constantly loop over the same extents + * because the old ones don't get deleted until the IO is + * started + */ + btrfs_fdatawrite_range(inode->i_mapping, start, start + len - 1, + WB_SYNC_NONE); kfree(ra); trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1); if (trans) { @@ -3238,6 +3253,8 @@ next: btrfs_clean_old_snapshots(tree_root); + btrfs_wait_ordered_extents(tree_root); + trans = btrfs_start_transaction(tree_root, 1); btrfs_commit_transaction(trans, tree_root); mutex_lock(&root->fs_info->alloc_mutex); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 83f17a5cbd6..a61f2e7e2db 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -213,6 +213,7 @@ int btrfs_defrag_file(struct file *file) struct inode *inode = fdentry(file)->d_inode; struct btrfs_root *root = BTRFS_I(inode)->root; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + struct btrfs_ordered_extent *ordered; struct page *page; unsigned long last_index; unsigned long ra_pages = root->fs_info->bdi.ra_pages; @@ -234,6 +235,7 @@ int btrfs_defrag_file(struct file *file) min(last_index, i + ra_pages - 1)); } total_read++; +again: page = grab_cache_page(inode->i_mapping, i); if (!page) goto out_unlock; @@ -247,18 +249,23 @@ int btrfs_defrag_file(struct file *file) } } -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18) - ClearPageDirty(page); -#else - cancel_dirty_page(page, PAGE_CACHE_SIZE); -#endif wait_on_page_writeback(page); - set_page_extent_mapped(page); page_start = (u64)page->index << PAGE_CACHE_SHIFT; page_end = page_start + PAGE_CACHE_SIZE - 1; - lock_extent(io_tree, page_start, page_end, GFP_NOFS); + + ordered = btrfs_lookup_ordered_extent(inode, page_start); + if (ordered) { + unlock_extent(io_tree, page_start, page_end, GFP_NOFS); + unlock_page(page); + page_cache_release(page); + btrfs_start_ordered_extent(inode, ordered, 1); + btrfs_put_ordered_extent(ordered); + goto again; + } + set_page_extent_mapped(page); + set_extent_delalloc(io_tree, page_start, page_end, GFP_NOFS); diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index e42fd233e04..676e4bd65c5 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -167,20 +167,28 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, entry->file_offset = file_offset; entry->start = start; entry->len = len; + entry->inode = inode; + /* one ref for the tree */ atomic_set(&entry->refs, 1); init_waitqueue_head(&entry->wait); INIT_LIST_HEAD(&entry->list); + INIT_LIST_HEAD(&entry->root_extent_list); node = tree_insert(&tree->tree, file_offset, &entry->rb_node); if (node) { - entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); - atomic_inc(&entry->refs); + printk("warning dup entry from add_ordered_extent\n"); + BUG(); } set_extent_ordered(&BTRFS_I(inode)->io_tree, file_offset, entry_end(entry) - 1, GFP_NOFS); + spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); + list_add_tail(&entry->root_extent_list, + &BTRFS_I(inode)->root->fs_info->ordered_extents); + spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); + mutex_unlock(&tree->mutex); BUG_ON(node); return 0; @@ -285,11 +293,55 @@ int btrfs_remove_ordered_extent(struct inode *inode, rb_erase(node, &tree->tree); tree->last = NULL; set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); + + spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); + list_del_init(&entry->root_extent_list); + spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); + mutex_unlock(&tree->mutex); wake_up(&entry->wait); return 0; } +int btrfs_wait_ordered_extents(struct btrfs_root *root) +{ + struct list_head splice; + struct list_head *cur; + struct btrfs_ordered_extent *ordered; + struct inode *inode; + + INIT_LIST_HEAD(&splice); + + spin_lock(&root->fs_info->ordered_extent_lock); + list_splice_init(&root->fs_info->ordered_extents, &splice); + while(!list_empty(&splice)) { + cur = splice.next; + ordered = list_entry(cur, struct btrfs_ordered_extent, + root_extent_list); + list_del_init(&ordered->root_extent_list); + atomic_inc(&ordered->refs); + inode = ordered->inode; + + /* + * the inode can't go away until all the pages are gone + * and the pages won't go away while there is still + * an ordered extent and the ordered extent won't go + * away until it is off this list. So, we can safely + * increment i_count here and call iput later + */ + atomic_inc(&inode->i_count); + spin_unlock(&root->fs_info->ordered_extent_lock); + + btrfs_start_ordered_extent(inode, ordered, 1); + btrfs_put_ordered_extent(ordered); + iput(inode); + + spin_lock(&root->fs_info->ordered_extent_lock); + } + spin_unlock(&root->fs_info->ordered_extent_lock); + return 0; +} + /* * Used to start IO or wait for a given ordered extent to finish. * diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index 199cb0b4f1d..5efe6b63c74 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -80,6 +80,9 @@ struct btrfs_ordered_extent { /* reference count */ atomic_t refs; + /* the inode we belong to */ + struct inode *inode; + /* list of checksums for insertion when the extent io is done */ struct list_head list; @@ -88,6 +91,9 @@ struct btrfs_ordered_extent { /* our friendly rbtree entry */ struct rb_node rb_node; + + /* a per root list of all the pending ordered extents */ + struct list_head root_extent_list; }; @@ -137,4 +143,5 @@ int btrfs_wait_on_page_writeback_range(struct address_space *mapping, pgoff_t start, pgoff_t end); int btrfs_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end, int sync_mode); +int btrfs_wait_ordered_extents(struct btrfs_root *root); #endif -- cgit v1.2.3-70-g09d2 From b48652c101cce7a54379a49cc0cf854cec2c94e2 Mon Sep 17 00:00:00 2001 From: Yan Zheng Date: Mon, 4 Aug 2008 23:23:47 -0400 Subject: Btrfs: Various small fixes. This trivial patch contains two locking fixes and a off by one fix. --- Signed-off-by: Chris Mason --- fs/btrfs/ordered-data.c | 4 ++-- fs/btrfs/root-tree.c | 3 +-- fs/btrfs/super.c | 2 ++ fs/btrfs/transaction.c | 9 +++++---- fs/btrfs/transaction.h | 3 +-- 5 files changed, 11 insertions(+), 10 deletions(-) (limited to 'fs/btrfs/ordered-data.c') diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 676e4bd65c5..db200e6baf7 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -548,7 +548,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, */ test = rb_entry(node, struct btrfs_ordered_extent, rb_node); if (test->file_offset > entry_end(ordered)) { - i_size_test = test->file_offset - 1; + i_size_test = test->file_offset; } } else { i_size_test = i_size_read(inode); @@ -561,7 +561,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, * disk_i_size to the end of the region. */ if (i_size_test > entry_end(ordered) && - !test_range_bit(io_tree, entry_end(ordered), i_size_test, + !test_range_bit(io_tree, entry_end(ordered), i_size_test - 1, EXTENT_DELALLOC, 0)) { new_i_size = min_t(u64, i_size_test, i_size_read(inode)); } diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index a5c0e98b5ae..36726696e58 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c @@ -209,8 +209,7 @@ again: goto err; } - ret = btrfs_add_dead_root(dead_root, latest, - &root->fs_info->dead_roots); + ret = btrfs_add_dead_root(dead_root, latest); if (ret) goto err; goto again; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index a6a418b6894..eb4b357d05e 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -449,7 +449,9 @@ static int btrfs_get_sb(struct file_system_type *fs_type, int flags, s->s_flags |= MS_ACTIVE; } + mutex_lock(&s->s_root->d_inode->i_mutex); root = lookup_one_len(subvol_name, s->s_root, strlen(subvol_name)); + mutex_unlock(&s->s_root->d_inode->i_mutex); if (IS_ERR(root)) { up_write(&s->s_umount); deactivate_super(s); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index a2c821e3c3a..ebf5362da1d 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -389,9 +389,7 @@ int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans, return 0; } -int btrfs_add_dead_root(struct btrfs_root *root, - struct btrfs_root *latest, - struct list_head *dead_list) +int btrfs_add_dead_root(struct btrfs_root *root, struct btrfs_root *latest) { struct btrfs_dirty_root *dirty; @@ -400,7 +398,10 @@ int btrfs_add_dead_root(struct btrfs_root *root, return -ENOMEM; dirty->root = root; dirty->latest_root = latest; - list_add(&dirty->list, dead_list); + + mutex_lock(&root->fs_info->trans_mutex); + list_add(&dirty->list, &latest->fs_info->dead_roots); + mutex_unlock(&root->fs_info->trans_mutex); return 0; } diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 2c73caeebb2..598baa31241 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -90,8 +90,7 @@ int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans, struct btrfs_root *root); -int btrfs_add_dead_root(struct btrfs_root *root, struct btrfs_root *latest, - struct list_head *dead_list); +int btrfs_add_dead_root(struct btrfs_root *root, struct btrfs_root *latest); int btrfs_defrag_root(struct btrfs_root *root, int cacheonly); int btrfs_clean_old_snapshots(struct btrfs_root *root); int btrfs_commit_transaction(struct btrfs_trans_handle *trans, -- cgit v1.2.3-70-g09d2 From 7ea394f1192bee1af67ea4762c88ef4b7b0487a8 Mon Sep 17 00:00:00 2001 From: Yan Zheng Date: Tue, 5 Aug 2008 13:05:02 -0400 Subject: Btrfs: Fix nodatacow for the new data=ordered mode Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 3 ++- fs/btrfs/extent-tree.c | 11 +++++---- fs/btrfs/inode.c | 60 +++++++++++++++++++++++++++++++------------------ fs/btrfs/ioctl.c | 1 + fs/btrfs/ordered-data.c | 16 ++++++++++--- fs/btrfs/ordered-data.h | 6 +++-- fs/btrfs/transaction.c | 11 +++++++++ 7 files changed, 74 insertions(+), 34 deletions(-) (limited to 'fs/btrfs/ordered-data.c') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 116aee21bf7..f90e5a7ac16 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1403,7 +1403,8 @@ static inline struct dentry *fdentry(struct file *file) { } /* extent-tree.c */ -int btrfs_cross_ref_exists(struct btrfs_root *root, +int btrfs_cross_ref_exists(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct btrfs_key *key, u64 bytenr); int btrfs_extent_post_op(struct btrfs_trans_handle *trans, struct btrfs_root *root); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 33cb2ac4cb2..fff219ed61d 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -893,10 +893,10 @@ out: return ret; } -int btrfs_cross_ref_exists(struct btrfs_root *root, +int btrfs_cross_ref_exists(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct btrfs_key *key, u64 bytenr) { - struct btrfs_trans_handle *trans; struct btrfs_root *old_root; struct btrfs_path *path = NULL; struct extent_buffer *eb; @@ -908,6 +908,7 @@ int btrfs_cross_ref_exists(struct btrfs_root *root, int level; int ret; + BUG_ON(trans == NULL); BUG_ON(key->type != BTRFS_EXTENT_DATA_KEY); ret = get_reference_status(root, bytenr, 0, key->objectid, &min_generation, &ref_count); @@ -917,7 +918,6 @@ int btrfs_cross_ref_exists(struct btrfs_root *root, if (ref_count != 1) return 1; - trans = btrfs_start_transaction(root, 0); old_root = root->dirty_root->root; ref_generation = old_root->root_key.offset; @@ -973,7 +973,6 @@ int btrfs_cross_ref_exists(struct btrfs_root *root, out: if (path) btrfs_free_path(path); - btrfs_end_transaction(trans, root); return ret; } @@ -3320,7 +3319,7 @@ again: mutex_unlock(&root->fs_info->alloc_mutex); btrfs_start_delalloc_inodes(root); - btrfs_wait_ordered_extents(tree_root); + btrfs_wait_ordered_extents(tree_root, 0); mutex_lock(&root->fs_info->alloc_mutex); @@ -3407,7 +3406,7 @@ next: btrfs_clean_old_snapshots(tree_root); btrfs_start_delalloc_inodes(root); - btrfs_wait_ordered_extents(tree_root); + btrfs_wait_ordered_extents(tree_root, 0); trans = btrfs_start_transaction(tree_root, 1); btrfs_commit_transaction(trans, tree_root); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 4d8ffc01931..c33053ba381 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -166,7 +166,7 @@ static int cow_file_range(struct inode *inode, u64 start, u64 end) cur_alloc_size = ins.offset; ret = btrfs_add_ordered_extent(inode, start, ins.objectid, - ins.offset); + ins.offset, 0); BUG_ON(ret); if (num_bytes < cur_alloc_size) { printk("num_bytes %Lu cur_alloc %Lu\n", num_bytes, @@ -187,31 +187,32 @@ static int run_delalloc_nocow(struct inode *inode, u64 start, u64 end) u64 extent_start; u64 extent_end; u64 bytenr; - u64 cow_end; u64 loops = 0; u64 total_fs_bytes; struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_block_group_cache *block_group; + struct btrfs_trans_handle *trans; struct extent_buffer *leaf; int found_type; struct btrfs_path *path; struct btrfs_file_extent_item *item; int ret; - int err; + int err = 0; struct btrfs_key found_key; total_fs_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy); path = btrfs_alloc_path(); BUG_ON(!path); + trans = btrfs_join_transaction(root, 1); + BUG_ON(!trans); again: ret = btrfs_lookup_file_extent(NULL, root, path, inode->i_ino, start, 0); if (ret < 0) { - btrfs_free_path(path); - return ret; + err = ret; + goto out; } - cow_end = end; if (ret != 0) { if (path->slots[0] == 0) goto not_found; @@ -244,12 +245,11 @@ again: if (start < extent_start || start >= extent_end) goto not_found; - cow_end = min(end, extent_end - 1); bytenr = btrfs_file_extent_disk_bytenr(leaf, item); if (bytenr == 0) goto not_found; - if (btrfs_cross_ref_exists(root, &found_key, bytenr)) + if (btrfs_cross_ref_exists(trans, root, &found_key, bytenr)) goto not_found; /* * we may be called by the resizer, make sure we're inside @@ -260,24 +260,32 @@ again: if (!block_group || block_group->ro) goto not_found; + bytenr += btrfs_file_extent_offset(leaf, item); + extent_num_bytes = min(end + 1, extent_end) - start; + ret = btrfs_add_ordered_extent(inode, start, bytenr, + extent_num_bytes, 1); + if (ret) { + err = ret; + goto out; + } + + btrfs_release_path(root, path); start = extent_end; + if (start <= end) { + loops++; + goto again; + } } else { - goto not_found; - } -loop: - if (start > end) { +not_found: + btrfs_end_transaction(trans, root); btrfs_free_path(path); - return 0; + return cow_file_range(inode, start, end); } - btrfs_release_path(root, path); - loops++; - goto again; - -not_found: - btrfs_release_path(root, path); - cow_file_range(inode, start, end); - start = end + 1; - goto loop; +out: + WARN_ON(err); + btrfs_end_transaction(trans, root); + btrfs_free_path(path); + return err; } static int run_delalloc_range(struct inode *inode, u64 start, u64 end) @@ -385,6 +393,11 @@ int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, goto mapit; } + if (btrfs_test_opt(root, NODATASUM) || + btrfs_test_flag(inode, NODATASUM)) { + goto mapit; + } + return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, inode, rw, bio, mirror_num, __btrfs_submit_bio_hook); @@ -527,6 +540,8 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) ordered_extent = btrfs_lookup_ordered_extent(inode, start); BUG_ON(!ordered_extent); + if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) + goto nocow; lock_extent(io_tree, ordered_extent->file_offset, ordered_extent->file_offset + ordered_extent->len - 1, @@ -567,6 +582,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) unlock_extent(io_tree, ordered_extent->file_offset, ordered_extent->file_offset + ordered_extent->len - 1, GFP_NOFS); +nocow: add_pending_csums(trans, inode, ordered_extent->file_offset, &ordered_extent->list); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 3932c7cd0fa..59b64c738fd 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -36,6 +36,7 @@ #include #include #include +#include #include "ctree.h" #include "disk-io.h" #include "transaction.h" diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index db200e6baf7..da6d43eb41d 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -152,7 +152,7 @@ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, * inserted. */ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, - u64 start, u64 len) + u64 start, u64 len, int nocow) { struct btrfs_ordered_inode_tree *tree; struct rb_node *node; @@ -168,6 +168,8 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, entry->start = start; entry->len = len; entry->inode = inode; + if (nocow) + set_bit(BTRFS_ORDERED_NOCOW, &entry->flags); /* one ref for the tree */ atomic_set(&entry->refs, 1); @@ -303,10 +305,11 @@ int btrfs_remove_ordered_extent(struct inode *inode, return 0; } -int btrfs_wait_ordered_extents(struct btrfs_root *root) +int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only) { struct list_head splice; struct list_head *cur; + struct list_head *tmp; struct btrfs_ordered_extent *ordered; struct inode *inode; @@ -314,10 +317,16 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root) spin_lock(&root->fs_info->ordered_extent_lock); list_splice_init(&root->fs_info->ordered_extents, &splice); - while(!list_empty(&splice)) { + list_for_each_safe(cur, tmp, &splice) { cur = splice.next; ordered = list_entry(cur, struct btrfs_ordered_extent, root_extent_list); + if (nocow_only && + !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) { + cond_resched_lock(&root->fs_info->ordered_extent_lock); + continue; + } + list_del_init(&ordered->root_extent_list); atomic_inc(&ordered->refs); inode = ordered->inode; @@ -338,6 +347,7 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root) spin_lock(&root->fs_info->ordered_extent_lock); } + list_splice_init(&splice, &root->fs_info->ordered_extents); spin_unlock(&root->fs_info->ordered_extent_lock); return 0; } diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index 5efe6b63c74..fd45519f30a 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -64,6 +64,8 @@ struct btrfs_ordered_sum { #define BTRFS_ORDERED_COMPLETE 1 /* set when removed from the tree */ +#define BTRFS_ORDERED_NOCOW 2 /* set when we want to write in place */ + struct btrfs_ordered_extent { /* logical offset in the file */ u64 file_offset; @@ -125,7 +127,7 @@ int btrfs_remove_ordered_extent(struct inode *inode, int btrfs_dec_test_ordered_pending(struct inode *inode, u64 file_offset, u64 io_size); int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, - u64 start, u64 len); + u64 start, u64 len, int nocow); int btrfs_add_ordered_sum(struct inode *inode, struct btrfs_ordered_extent *entry, struct btrfs_ordered_sum *sum); @@ -143,5 +145,5 @@ int btrfs_wait_on_page_writeback_range(struct address_space *mapping, pgoff_t start, pgoff_t end); int btrfs_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end, int sync_mode); -int btrfs_wait_ordered_extents(struct btrfs_root *root); +int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only); #endif diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index ebf5362da1d..9d3d08e9f8d 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -438,6 +438,7 @@ static noinline int add_dirty_roots(struct btrfs_trans_handle *trans, free_extent_buffer(root->commit_root); root->commit_root = NULL; + root->dirty_root = NULL; spin_lock(&root->list_lock); list_del_init(&dirty->root->dead_list); @@ -461,6 +462,7 @@ static noinline int add_dirty_roots(struct btrfs_trans_handle *trans, sizeof(struct btrfs_disk_key)); root->root_item.drop_level = 0; root->commit_root = NULL; + root->dirty_root = NULL; root->root_key.offset = root->fs_info->generation; btrfs_set_root_bytenr(&root->root_item, root->node->start); @@ -762,7 +764,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, } do { + int snap_pending = 0; joined = cur_trans->num_joined; + if (!list_empty(&trans->transaction->pending_snapshots)) + snap_pending = 1; + WARN_ON(cur_trans != trans->transaction); prepare_to_wait(&cur_trans->writer_wait, &wait, TASK_UNINTERRUPTIBLE); @@ -774,6 +780,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, mutex_unlock(&root->fs_info->trans_mutex); + if (snap_pending) { + ret = btrfs_wait_ordered_extents(root, 1); + BUG_ON(ret); + } + schedule_timeout(timeout); mutex_lock(&root->fs_info->trans_mutex); -- cgit v1.2.3-70-g09d2 From 5b21f2ed3f2947b5195b65c9fdbdd9e52904cc03 Mon Sep 17 00:00:00 2001 From: Zheng Yan Date: Fri, 26 Sep 2008 10:05:38 -0400 Subject: Btrfs: extent_map and data=ordered fixes for space balancing * Add an EXTENT_BOUNDARY state bit to keep the writepage code from merging data extents that are in the process of being relocated. This allows us to do accounting for them properly. * The balancing code relocates data extents indepdent of the underlying inode. The extent_map code was modified to properly account for things moving around (invalidating extent_map caches in the inode). * Don't take the drop_mutex in the create_subvol ioctl. It isn't required. * Fix walking of the ordered extent list to avoid races with sys_unlink * Change the lock ordering rules. Transaction start goes outside the drop_mutex. This allows btrfs_commit_transaction to directly drop the relocation trees. Signed-off-by: Chris Mason --- fs/btrfs/ctree.c | 9 +++++---- fs/btrfs/ctree.h | 11 ++++++++++- fs/btrfs/extent_io.c | 13 +++++++++---- fs/btrfs/extent_io.h | 1 + fs/btrfs/file.c | 31 +++++++++++++++++++++++------ fs/btrfs/inode-map.c | 4 ++++ fs/btrfs/inode.c | 52 +++++++++++++++++++++++++++++++++++-------------- fs/btrfs/ioctl.c | 2 -- fs/btrfs/ordered-data.c | 26 ++++++++++++------------- fs/btrfs/transaction.c | 8 ++++---- 10 files changed, 108 insertions(+), 49 deletions(-) (limited to 'fs/btrfs/ordered-data.c') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 50aea8cb653..f9cd40967d0 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -290,7 +290,6 @@ int noinline btrfs_cow_block(struct btrfs_trans_handle *trans, struct extent_buffer **cow_ret, u64 prealloc_dest) { u64 search_start; - u64 header_trans; int ret; if (trans->transaction != root->fs_info->running_transaction) { @@ -304,9 +303,9 @@ int noinline btrfs_cow_block(struct btrfs_trans_handle *trans, WARN_ON(1); } - header_trans = btrfs_header_generation(buf); spin_lock(&root->fs_info->hash_lock); - if (header_trans == trans->transid && + if (btrfs_header_generation(buf) == trans->transid && + btrfs_header_owner(buf) == root->root_key.objectid && !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) { *cow_ret = buf; spin_unlock(&root->fs_info->hash_lock); @@ -1300,6 +1299,7 @@ again: /* is a cow on this block not required */ spin_lock(&root->fs_info->hash_lock); if (btrfs_header_generation(b) == trans->transid && + btrfs_header_owner(b) == root->root_key.objectid && !btrfs_header_flag(b, BTRFS_HEADER_FLAG_WRITTEN)) { spin_unlock(&root->fs_info->hash_lock); goto cow_done; @@ -1396,7 +1396,8 @@ cow_done: /* this is only true while dropping a snapshot */ if (level == lowest_level) { - break; + ret = 0; + goto done; } blocknr = btrfs_node_blockptr(b, slot); diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index b9f9f815ed0..3e62a1b0a1f 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1486,6 +1486,9 @@ static inline struct dentry *fdentry(struct file *file) /* extent-tree.c */ int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len); +int btrfs_lookup_extent_ref(struct btrfs_trans_handle *trans, + struct btrfs_root *root, u64 bytenr, + u64 num_bytes, u32 *refs); int btrfs_update_pinned_extents(struct btrfs_root *root, u64 bytenr, u64 num, int pin); int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans, @@ -1812,6 +1815,8 @@ void btrfs_destroy_inode(struct inode *inode); int btrfs_init_cachep(void); void btrfs_destroy_cachep(void); long btrfs_ioctl_trans_end(struct file *file); +struct inode *btrfs_ilookup(struct super_block *s, u64 objectid, + struct btrfs_root *root, int wait); struct inode *btrfs_iget_locked(struct super_block *s, u64 objectid, struct btrfs_root *root); struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, @@ -1824,13 +1829,17 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, int btrfs_update_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode); +int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode); +int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode); +void btrfs_orphan_cleanup(struct btrfs_root *root); /* ioctl.c */ long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); /* file.c */ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync); -int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end); +int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, + int skip_pinned); int btrfs_check_file(struct btrfs_root *root, struct inode *inode); extern struct file_operations btrfs_file_operations; int btrfs_drop_extents(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index e3a25be5c66..8bd1b402f3f 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -292,7 +292,7 @@ static int merge_state(struct extent_io_tree *tree, struct extent_state *other; struct rb_node *other_node; - if (state->state & EXTENT_IOBITS) + if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) return 0; other_node = rb_prev(&state->rb_node); @@ -1070,7 +1070,8 @@ search_again: while(1) { state = rb_entry(node, struct extent_state, rb_node); - if (found && state->start != cur_start) { + if (found && (state->start != cur_start || + (state->state & EXTENT_BOUNDARY))) { goto out; } if (!(state->state & EXTENT_DELALLOC)) { @@ -1078,7 +1079,7 @@ search_again: *end = state->end; goto out; } - if (!found) { + if (!found && !(state->state & EXTENT_BOUNDARY)) { struct extent_state *prev_state; struct rb_node *prev_node = node; while(1) { @@ -1088,7 +1089,11 @@ search_again: prev_state = rb_entry(prev_node, struct extent_state, rb_node); - if (!(prev_state->state & EXTENT_DELALLOC)) + if ((prev_state->end + 1 != state->start) || + !(prev_state->state & EXTENT_DELALLOC)) + break; + if ((cur_start - prev_state->start) * 2 > + max_bytes) break; state = prev_state; node = prev_node; diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 3cb411a5f4d..c9d1908a1ae 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -15,6 +15,7 @@ #define EXTENT_BUFFER_FILLED (1 << 8) #define EXTENT_ORDERED (1 << 9) #define EXTENT_ORDERED_METADATA (1 << 10) +#define EXTENT_BOUNDARY (1 << 11) #define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK) /* diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 8856570a0eb..1b7e51a9db0 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -294,7 +294,7 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans, last_pos_in_file, 0, 0, hole_size, 0); btrfs_drop_extent_cache(inode, last_pos_in_file, - last_pos_in_file + hole_size -1); + last_pos_in_file + hole_size - 1, 0); mutex_unlock(&BTRFS_I(inode)->extent_mutex); btrfs_check_file(root, inode); } @@ -337,7 +337,7 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans, inline_size -= start_pos; err = insert_inline_extent(trans, root, inode, start_pos, inline_size, pages, 0, num_pages); - btrfs_drop_extent_cache(inode, start_pos, aligned_end - 1); + btrfs_drop_extent_cache(inode, start_pos, aligned_end - 1, 0); BUG_ON(err); mutex_unlock(&BTRFS_I(inode)->extent_mutex); @@ -362,7 +362,8 @@ out_unlock: return err; } -int noinline btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) +int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, + int skip_pinned) { struct extent_map *em; struct extent_map *split = NULL; @@ -371,6 +372,7 @@ int noinline btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) u64 len = end - start + 1; int ret; int testend = 1; + unsigned long flags; WARN_ON(end < start); if (end == (u64)-1) { @@ -389,6 +391,23 @@ int noinline btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) spin_unlock(&em_tree->lock); break; } + flags = em->flags; + if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) { + spin_unlock(&em_tree->lock); + if (em->start <= start && + (!testend || em->start + em->len >= start + len)) { + free_extent_map(em); + break; + } + if (start < em->start) { + len = em->start - start; + } else { + len = start + len - (em->start + em->len); + start = em->start + em->len; + } + free_extent_map(em); + continue; + } clear_bit(EXTENT_FLAG_PINNED, &em->flags); remove_extent_mapping(em_tree, em); @@ -398,7 +417,7 @@ int noinline btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) split->len = start - em->start; split->block_start = em->block_start; split->bdev = em->bdev; - split->flags = em->flags; + split->flags = flags; ret = add_extent_mapping(em_tree, split); BUG_ON(ret); free_extent_map(split); @@ -412,7 +431,7 @@ int noinline btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) split->start = start + len; split->len = em->start + em->len - (start + len); split->bdev = em->bdev; - split->flags = em->flags; + split->flags = flags; split->block_start = em->block_start + diff; @@ -541,7 +560,7 @@ int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans, int recow; int ret; - btrfs_drop_extent_cache(inode, start, end - 1); + btrfs_drop_extent_cache(inode, start, end - 1, 0); path = btrfs_alloc_path(); if (!path) diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index cd6171c2da4..80038c5ef7c 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -117,10 +117,14 @@ int btrfs_find_free_objectid(struct btrfs_trans_handle *trans, *objectid = last_ino; goto found; } + } else if (key.objectid > search_start) { + *objectid = search_start; + goto found; } } if (key.objectid >= BTRFS_LAST_FREE_OBJECTID) break; + start_found = 1; last_ino = key.objectid + 1; path->slots[0]++; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 48a3dc03080..4516fbf0167 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -135,7 +135,7 @@ static int cow_file_range(struct inode *inode, u64 start, u64 end) BUG_ON(num_bytes > btrfs_super_total_bytes(&root->fs_info->super_copy)); mutex_lock(&BTRFS_I(inode)->extent_mutex); - btrfs_drop_extent_cache(inode, start, start + num_bytes - 1); + btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); mutex_unlock(&BTRFS_I(inode)->extent_mutex); while(num_bytes > 0) { @@ -163,7 +163,7 @@ static int cow_file_range(struct inode *inode, u64 start, u64 end) break; } btrfs_drop_extent_cache(inode, start, - start + ins.offset - 1); + start + ins.offset - 1, 0); } mutex_unlock(&BTRFS_I(inode)->extent_mutex); @@ -587,7 +587,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) btrfs_drop_extent_cache(inode, ordered_extent->file_offset, ordered_extent->file_offset + - ordered_extent->len - 1); + ordered_extent->len - 1, 0); mutex_unlock(&BTRFS_I(inode)->extent_mutex); ins.objectid = ordered_extent->start; @@ -880,7 +880,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) int ret = 0, nr_unlink = 0, nr_truncate = 0; /* don't do orphan cleanup if the fs is readonly. */ - if (root->inode->i_sb->s_flags & MS_RDONLY) + if (root->fs_info->sb->s_flags & MS_RDONLY) return; path = btrfs_alloc_path(); @@ -892,8 +892,6 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY); key.offset = (u64)-1; - trans = btrfs_start_transaction(root, 1); - btrfs_set_trans_block_group(trans, root->inode); while (1) { ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); @@ -933,7 +931,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) * crossing root thing. we store the inode number in the * offset of the orphan item. */ - inode = btrfs_iget_locked(root->inode->i_sb, + inode = btrfs_iget_locked(root->fs_info->sb, found_key.offset, root); if (!inode) break; @@ -965,7 +963,9 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) * do a destroy_inode */ if (is_bad_inode(inode)) { + trans = btrfs_start_transaction(root, 1); btrfs_orphan_del(trans, inode); + btrfs_end_transaction(trans, root); iput(inode); continue; } @@ -988,7 +988,6 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate); btrfs_free_path(path); - btrfs_end_transaction(trans, root); } void btrfs_read_locked_inode(struct inode *inode) @@ -1343,8 +1342,7 @@ noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, u64 mask = root->sectorsize - 1; if (root->ref_cows) - btrfs_drop_extent_cache(inode, - new_size & (~mask), (u64)-1); + btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0); path = btrfs_alloc_path(); path->reada = -1; BUG_ON(!path); @@ -1677,7 +1675,7 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr) hole_start, 0, 0, hole_size, 0); btrfs_drop_extent_cache(inode, hole_start, - (u64)-1); + (u64)-1, 0); btrfs_check_file(root, inode); } mutex_unlock(&BTRFS_I(inode)->extent_mutex); @@ -1843,6 +1841,24 @@ static int btrfs_find_actor(struct inode *inode, void *opaque) args->root == BTRFS_I(inode)->root); } +struct inode *btrfs_ilookup(struct super_block *s, u64 objectid, + struct btrfs_root *root, int wait) +{ + struct inode *inode; + struct btrfs_iget_args args; + args.ino = objectid; + args.root = root; + + if (wait) { + inode = ilookup5(s, objectid, btrfs_find_actor, + (void *)&args); + } else { + inode = ilookup5_nowait(s, objectid, btrfs_find_actor, + (void *)&args); + } + return inode; +} + struct inode *btrfs_iget_locked(struct super_block *s, u64 objectid, struct btrfs_root *root) { @@ -3266,7 +3282,7 @@ void btrfs_destroy_inode(struct inode *inode) btrfs_put_ordered_extent(ordered); } } - btrfs_drop_extent_cache(inode, 0, (u64)-1); + btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); } @@ -3412,16 +3428,22 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root) { struct list_head *head = &root->fs_info->delalloc_inodes; struct btrfs_inode *binode; + struct inode *inode; unsigned long flags; spin_lock_irqsave(&root->fs_info->delalloc_lock, flags); while(!list_empty(head)) { binode = list_entry(head->next, struct btrfs_inode, delalloc_inodes); - atomic_inc(&binode->vfs_inode.i_count); + inode = igrab(&binode->vfs_inode); + if (!inode) + list_del_init(&binode->delalloc_inodes); spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags); - filemap_write_and_wait(binode->vfs_inode.i_mapping); - iput(&binode->vfs_inode); + if (inode) { + filemap_write_and_wait(inode->i_mapping); + iput(inode); + } + cond_resched(); spin_lock_irqsave(&root->fs_info->delalloc_lock, flags); } spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 4c6e0c15754..04de767a8db 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -444,12 +444,10 @@ static noinline int btrfs_ioctl_snap_create(struct btrfs_root *root, goto out; } - mutex_lock(&root->fs_info->drop_mutex); if (root == root->fs_info->tree_root) ret = create_subvol(root, vol_args->name, namelen); else ret = create_snapshot(root, vol_args->name, namelen); - mutex_unlock(&root->fs_info->drop_mutex); out: kfree(vol_args); return ret; diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index da6d43eb41d..951eacff242 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -309,7 +309,6 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only) { struct list_head splice; struct list_head *cur; - struct list_head *tmp; struct btrfs_ordered_extent *ordered; struct inode *inode; @@ -317,37 +316,38 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only) spin_lock(&root->fs_info->ordered_extent_lock); list_splice_init(&root->fs_info->ordered_extents, &splice); - list_for_each_safe(cur, tmp, &splice) { + while (!list_empty(&splice)) { cur = splice.next; ordered = list_entry(cur, struct btrfs_ordered_extent, root_extent_list); if (nocow_only && !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) { + list_move(&ordered->root_extent_list, + &root->fs_info->ordered_extents); cond_resched_lock(&root->fs_info->ordered_extent_lock); continue; } list_del_init(&ordered->root_extent_list); atomic_inc(&ordered->refs); - inode = ordered->inode; /* - * the inode can't go away until all the pages are gone - * and the pages won't go away while there is still - * an ordered extent and the ordered extent won't go - * away until it is off this list. So, we can safely - * increment i_count here and call iput later + * the inode may be getting freed (in sys_unlink path). */ - atomic_inc(&inode->i_count); + inode = igrab(ordered->inode); + spin_unlock(&root->fs_info->ordered_extent_lock); - btrfs_start_ordered_extent(inode, ordered, 1); - btrfs_put_ordered_extent(ordered); - iput(inode); + if (inode) { + btrfs_start_ordered_extent(inode, ordered, 1); + btrfs_put_ordered_extent(ordered); + iput(inode); + } else { + btrfs_put_ordered_extent(ordered); + } spin_lock(&root->fs_info->ordered_extent_lock); } - list_splice_init(&splice, &root->fs_info->ordered_extents); spin_unlock(&root->fs_info->ordered_extent_lock); return 0; } diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 656baefa525..8c83cf464c8 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -109,6 +109,7 @@ noinline int btrfs_record_root_in_trans(struct btrfs_root *root) spin_lock_init(&dirty->root->node_lock); spin_lock_init(&dirty->root->list_lock); mutex_init(&dirty->root->objectid_mutex); + mutex_init(&dirty->root->log_mutex); INIT_LIST_HEAD(&dirty->root->dead_list); dirty->root->node = root->commit_root; dirty->root->commit_root = NULL; @@ -590,13 +591,14 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root, root = dirty->latest_root; atomic_inc(&root->fs_info->throttles); - mutex_lock(&root->fs_info->drop_mutex); while(1) { trans = btrfs_start_transaction(tree_root, 1); + mutex_lock(&root->fs_info->drop_mutex); ret = btrfs_drop_snapshot(trans, dirty->root); if (ret != -EAGAIN) { break; } + mutex_unlock(&root->fs_info->drop_mutex); err = btrfs_update_root(trans, tree_root, @@ -608,10 +610,8 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root, ret = btrfs_end_transaction(trans, tree_root); BUG_ON(ret); - mutex_unlock(&root->fs_info->drop_mutex); btrfs_btree_balance_dirty(tree_root, nr); cond_resched(); - mutex_lock(&root->fs_info->drop_mutex); } BUG_ON(ret); atomic_dec(&root->fs_info->throttles); @@ -689,7 +689,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); key.objectid = objectid; - key.offset = 1; + key.offset = trans->transid; btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY); old = btrfs_lock_root_node(root); -- cgit v1.2.3-70-g09d2 From d352ac68148b69937d39ca5d48bcc4478e118dbf Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 29 Sep 2008 15:18:18 -0400 Subject: Btrfs: add and improve comments This improves the comments at the top of many functions. It didn't dive into the guts of functions because I was trying to avoid merging problems with the new allocator and back reference work. extent-tree.c and volumes.c were both skipped, and there is definitely more work todo in cleaning and commenting the code. Signed-off-by: Chris Mason --- fs/btrfs/Makefile | 2 +- fs/btrfs/TODO | 20 ----- fs/btrfs/async-thread.c | 10 ++- fs/btrfs/async-thread.h | 7 +- fs/btrfs/bit-radix.c | 130 --------------------------------- fs/btrfs/bit-radix.h | 33 --------- fs/btrfs/btrfs_inode.h | 54 +++++++++++++- fs/btrfs/crc32c.h | 18 +++++ fs/btrfs/ctree.c | 127 +++++++++++++++++++++++++++++--- fs/btrfs/ctree.h | 1 - fs/btrfs/dir-item.c | 41 +++++++++++ fs/btrfs/disk-io.c | 33 ++++++++- fs/btrfs/extent_io.c | 34 ++++++++- fs/btrfs/extent_map.c | 10 +++ fs/btrfs/file.c | 44 ++++++++++- fs/btrfs/inode.c | 189 +++++++++++++++++++++++++++++++----------------- fs/btrfs/locking.c | 13 ++++ fs/btrfs/ordered-data.c | 19 ++++- fs/btrfs/ref-cache.c | 26 +++++++ fs/btrfs/ref-cache.h | 3 + fs/btrfs/root-tree.c | 21 +++++- fs/btrfs/struct-funcs.c | 21 ++++++ fs/btrfs/super.c | 3 + fs/btrfs/transaction.c | 67 ++++++++++++++++- fs/btrfs/tree-defrag.c | 4 + 25 files changed, 653 insertions(+), 277 deletions(-) delete mode 100644 fs/btrfs/TODO delete mode 100644 fs/btrfs/bit-radix.c delete mode 100644 fs/btrfs/bit-radix.h (limited to 'fs/btrfs/ordered-data.c') diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile index d5c28557fba..48b7909ca8d 100644 --- a/fs/btrfs/Makefile +++ b/fs/btrfs/Makefile @@ -4,7 +4,7 @@ ifneq ($(KERNELRELEASE),) obj-m := btrfs.o btrfs-y := super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \ file-item.o inode-item.o inode-map.o disk-io.o \ - transaction.o bit-radix.o inode.o file.o tree-defrag.o \ + transaction.o inode.o file.o tree-defrag.o \ extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \ extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \ ref-cache.o export.o tree-log.o acl.o free-space-cache.o diff --git a/fs/btrfs/TODO b/fs/btrfs/TODO deleted file mode 100644 index d9b6d38c603..00000000000 --- a/fs/btrfs/TODO +++ /dev/null @@ -1,20 +0,0 @@ -* cleanup, add more error checking, get rid of BUG_ONs -* Fix ENOSPC handling -* Make allocator smarter -* add a block group to struct inode -* Do actual block accounting -* Check compat and incompat flags on the inode -* Get rid of struct ctree_path, limiting tree levels held at one time -* Add generation number to key pointer in nodes -* Add generation number to inode -* forbid cross subvolume renames and hardlinks -* Release -* Do real tree locking -* Add extent mirroring (backup copies of blocks) -* Add fancy interface to get access to incremental backups -* Add fancy striped extents to make big reads faster -* Use relocation to try and fix write errors -* Make allocator much smarter -* xattrs (directory streams for regular files) -* Scrub & defrag - diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 4e780b279de..04fb9702d14 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c @@ -231,17 +231,25 @@ static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers) /* * if we pick a busy task, move the task to the end of the list. - * hopefully this will keep things somewhat evenly balanced + * hopefully this will keep things somewhat evenly balanced. + * Do the move in batches based on the sequence number. This groups + * requests submitted at roughly the same time onto the same worker. */ next = workers->worker_list.next; worker = list_entry(next, struct btrfs_worker_thread, worker_list); atomic_inc(&worker->num_pending); worker->sequence++; + if (worker->sequence % workers->idle_thresh == 0) list_move_tail(next, &workers->worker_list); return worker; } +/* + * selects a worker thread to take the next job. This will either find + * an idle worker, start a new worker up to the max count, or just return + * one of the existing busy workers. + */ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers) { struct btrfs_worker_thread *worker; diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h index 43e44d115dd..4ec9a2ee0f9 100644 --- a/fs/btrfs/async-thread.h +++ b/fs/btrfs/async-thread.h @@ -63,14 +63,17 @@ struct btrfs_workers { /* once a worker has this many requests or fewer, it is idle */ int idle_thresh; - /* list with all the work threads */ + /* list with all the work threads. The workers on the idle thread + * may be actively servicing jobs, but they haven't yet hit the + * idle thresh limit above. + */ struct list_head worker_list; struct list_head idle_list; /* lock for finding the next worker thread to queue on */ spinlock_t lock; - /* extra name for this worker */ + /* extra name for this worker, used for current->name */ char *name; }; diff --git a/fs/btrfs/bit-radix.c b/fs/btrfs/bit-radix.c deleted file mode 100644 index e8bf876db39..00000000000 --- a/fs/btrfs/bit-radix.c +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright (C) 2007 Oracle. All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public - * License v2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public - * License along with this program; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 021110-1307, USA. - */ - -#include "bit-radix.h" - -#define BIT_ARRAY_BYTES 256 -#define BIT_RADIX_BITS_PER_ARRAY ((BIT_ARRAY_BYTES - sizeof(unsigned long)) * 8) - -extern struct kmem_cache *btrfs_bit_radix_cachep; -int set_radix_bit(struct radix_tree_root *radix, unsigned long bit) -{ - unsigned long *bits; - unsigned long slot; - int bit_slot; - int ret; - - slot = bit / BIT_RADIX_BITS_PER_ARRAY; - bit_slot = bit % BIT_RADIX_BITS_PER_ARRAY; - - bits = radix_tree_lookup(radix, slot); - if (!bits) { - bits = kmem_cache_alloc(btrfs_bit_radix_cachep, GFP_NOFS); - if (!bits) - return -ENOMEM; - memset(bits + 1, 0, BIT_ARRAY_BYTES - sizeof(unsigned long)); - bits[0] = slot; - ret = radix_tree_insert(radix, slot, bits); - if (ret) - return ret; - } - ret = test_and_set_bit(bit_slot, bits + 1); - if (ret < 0) - ret = 1; - return ret; -} - -int test_radix_bit(struct radix_tree_root *radix, unsigned long bit) -{ - unsigned long *bits; - unsigned long slot; - int bit_slot; - - slot = bit / BIT_RADIX_BITS_PER_ARRAY; - bit_slot = bit % BIT_RADIX_BITS_PER_ARRAY; - - bits = radix_tree_lookup(radix, slot); - if (!bits) - return 0; - return test_bit(bit_slot, bits + 1); -} - -int clear_radix_bit(struct radix_tree_root *radix, unsigned long bit) -{ - unsigned long *bits; - unsigned long slot; - int bit_slot; - int i; - int empty = 1; - - slot = bit / BIT_RADIX_BITS_PER_ARRAY; - bit_slot = bit % BIT_RADIX_BITS_PER_ARRAY; - - bits = radix_tree_lookup(radix, slot); - if (!bits) - return 0; - clear_bit(bit_slot, bits + 1); - for (i = 1; i < BIT_ARRAY_BYTES / sizeof(unsigned long); i++) { - if (bits[i]) { - empty = 0; - break; - } - } - if (empty) { - bits = radix_tree_delete(radix, slot); - BUG_ON(!bits); - kmem_cache_free(btrfs_bit_radix_cachep, bits); - } - return 0; -} - -int find_first_radix_bit(struct radix_tree_root *radix, unsigned long *retbits, - unsigned long start, int nr) -{ - unsigned long *bits; - unsigned long *gang[4]; - int found; - int ret; - int i; - int total_found = 0; - unsigned long slot; - - slot = start / BIT_RADIX_BITS_PER_ARRAY; - ret = radix_tree_gang_lookup(radix, (void **)gang, slot, - ARRAY_SIZE(gang)); - found = start % BIT_RADIX_BITS_PER_ARRAY; - for (i = 0; i < ret && nr > 0; i++) { - bits = gang[i]; - while(nr > 0) { - found = find_next_bit(bits + 1, - BIT_RADIX_BITS_PER_ARRAY, - found); - if (found < BIT_RADIX_BITS_PER_ARRAY) { - *retbits = bits[0] * - BIT_RADIX_BITS_PER_ARRAY + found; - retbits++; - nr--; - total_found++; - found++; - } else - break; - } - found = 0; - } - return total_found; -} diff --git a/fs/btrfs/bit-radix.h b/fs/btrfs/bit-radix.h deleted file mode 100644 index c100f54d5c3..00000000000 --- a/fs/btrfs/bit-radix.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (C) 2007 Oracle. All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public - * License v2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public - * License along with this program; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 021110-1307, USA. - */ - -#ifndef __BIT_RADIX__ -#define __BIT_RADIX__ -#include - -int set_radix_bit(struct radix_tree_root *radix, unsigned long bit); -int test_radix_bit(struct radix_tree_root *radix, unsigned long bit); -int clear_radix_bit(struct radix_tree_root *radix, unsigned long bit); -int find_first_radix_bit(struct radix_tree_root *radix, unsigned long *retbits, - unsigned long start, int nr); - -static inline void init_bit_radix(struct radix_tree_root *radix) -{ - INIT_RADIX_TREE(radix, GFP_NOFS); -} -#endif diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 0577fda2168..0b2e623cf42 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -25,27 +25,58 @@ /* in memory btrfs inode */ struct btrfs_inode { + /* which subvolume this inode belongs to */ struct btrfs_root *root; + + /* the block group preferred for allocations. This pointer is buggy + * and needs to be replaced with a bytenr instead + */ struct btrfs_block_group_cache *block_group; + + /* key used to find this inode on disk. This is used by the code + * to read in roots of subvolumes + */ struct btrfs_key location; + + /* the extent_tree has caches of all the extent mappings to disk */ struct extent_map_tree extent_tree; + + /* the io_tree does range state (DIRTY, LOCKED etc) */ struct extent_io_tree io_tree; + + /* special utility tree used to record which mirrors have already been + * tried when checksums fail for a given block + */ struct extent_io_tree io_failure_tree; + + /* held while inserting checksums to avoid races */ struct mutex csum_mutex; + + /* held while inesrting or deleting extents from files */ struct mutex extent_mutex; + + /* held while logging the inode in tree-log.c */ struct mutex log_mutex; - struct inode vfs_inode; + + /* used to order data wrt metadata */ struct btrfs_ordered_inode_tree ordered_tree; + /* standard acl pointers */ struct posix_acl *i_acl; struct posix_acl *i_default_acl; /* for keeping track of orphaned inodes */ struct list_head i_orphan; + /* list of all the delalloc inodes in the FS. There are times we need + * to write all the delalloc pages to disk, and this list is used + * to walk them all. + */ struct list_head delalloc_inodes; - /* full 64 bit generation number */ + /* full 64 bit generation number, struct vfs_inode doesn't have a big + * enough field for this. + */ u64 generation; /* @@ -57,10 +88,25 @@ struct btrfs_inode { */ u64 logged_trans; - /* trans that last made a change that should be fully fsync'd */ + /* + * trans that last made a change that should be fully fsync'd. This + * gets reset to zero each time the inode is logged + */ u64 log_dirty_trans; + + /* total number of bytes pending delalloc, used by stat to calc the + * real block usage of the file + */ u64 delalloc_bytes; + + /* + * the size of the file stored in the metadata on disk. data=ordered + * means the in-memory i_size might be larger than the size on disk + * because not all the blocks are written yet. + */ u64 disk_i_size; + + /* flags field from the on disk inode */ u32 flags; /* @@ -68,6 +114,8 @@ struct btrfs_inode { * number for new files that are created */ u64 index_cnt; + + struct inode vfs_inode; }; static inline struct btrfs_inode *BTRFS_I(struct inode *inode) diff --git a/fs/btrfs/crc32c.h b/fs/btrfs/crc32c.h index 4f0fefed132..1eaf11d334f 100644 --- a/fs/btrfs/crc32c.h +++ b/fs/btrfs/crc32c.h @@ -1,3 +1,21 @@ +/* + * Copyright (C) 2008 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License v2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + #ifndef __BTRFS_CRC32C__ #define __BTRFS_CRC32C__ #include diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 50e81f43e6d..ff3261ff2e1 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2007 Oracle. All rights reserved. + * Copyright (C) 2007,2008 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public @@ -54,12 +54,19 @@ struct btrfs_path *btrfs_alloc_path(void) return path; } +/* this also releases the path */ void btrfs_free_path(struct btrfs_path *p) { btrfs_release_path(NULL, p); kmem_cache_free(btrfs_path_cachep, p); } +/* + * path release drops references on the extent buffers in the path + * and it drops any locks held by this path + * + * It is safe to call this on paths that no locks or extent buffers held. + */ void noinline btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p) { int i; @@ -77,6 +84,16 @@ void noinline btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p) } } +/* + * safely gets a reference on the root node of a tree. A lock + * is not taken, so a concurrent writer may put a different node + * at the root of the tree. See btrfs_lock_root_node for the + * looping required. + * + * The extent buffer returned by this has a reference taken, so + * it won't disappear. It may stop being the root of the tree + * at any time because there are no locks held. + */ struct extent_buffer *btrfs_root_node(struct btrfs_root *root) { struct extent_buffer *eb; @@ -87,6 +104,10 @@ struct extent_buffer *btrfs_root_node(struct btrfs_root *root) return eb; } +/* loop around taking references on and locking the root node of the + * tree until you end up with a lock on the root. A locked buffer + * is returned, with a reference held. + */ struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root) { struct extent_buffer *eb; @@ -108,6 +129,10 @@ struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root) return eb; } +/* cowonly root (everything not a reference counted cow subvolume), just get + * put onto a simple dirty list. transaction.c walks this to make sure they + * get properly updated on disk. + */ static void add_root_to_dirty_list(struct btrfs_root *root) { if (root->track_dirty && list_empty(&root->dirty_list)) { @@ -116,6 +141,11 @@ static void add_root_to_dirty_list(struct btrfs_root *root) } } +/* + * used by snapshot creation to make a copy of a root for a tree with + * a given objectid. The buffer with the new root node is returned in + * cow_ret, and this func returns zero on success or a negative error code. + */ int btrfs_copy_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, @@ -167,6 +197,22 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans, return 0; } +/* + * does the dirty work in cow of a single block. The parent block + * (if supplied) is updated to point to the new cow copy. The new + * buffer is marked dirty and returned locked. If you modify the block + * it needs to be marked dirty again. + * + * search_start -- an allocation hint for the new block + * + * empty_size -- a hint that you plan on doing more cow. This is the size in bytes + * the allocator should try to find free next to the block it returns. This is + * just a hint and may be ignored by the allocator. + * + * prealloc_dest -- if you have already reserved a destination for the cow, + * this uses that block instead of allocating a new one. btrfs_alloc_reserved_extent + * is used to finish the allocation. + */ int noinline __btrfs_cow_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, @@ -311,6 +357,11 @@ int noinline __btrfs_cow_block(struct btrfs_trans_handle *trans, return 0; } +/* + * cows a single block, see __btrfs_cow_block for the real work. + * This version of it has extra checks so that a block isn't cow'd more than + * once per transaction, as long as it hasn't been written yet + */ int noinline btrfs_cow_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, struct extent_buffer *parent, int parent_slot, @@ -347,6 +398,10 @@ int noinline btrfs_cow_block(struct btrfs_trans_handle *trans, return ret; } +/* + * helper function for defrag to decide if two blocks pointed to by a + * node are actually close by + */ static int close_blocks(u64 blocknr, u64 other, u32 blocksize) { if (blocknr < other && other - (blocknr + blocksize) < 32768) @@ -381,6 +436,11 @@ static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2) } +/* + * this is used by the defrag code to go through all the + * leaves pointed to by a node and reallocate them so that + * disk order is close to key order + */ int btrfs_realloc_node(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *parent, int start_slot, int cache_only, u64 *last_ret, @@ -521,6 +581,10 @@ static inline unsigned int leaf_data_end(struct btrfs_root *root, return btrfs_item_offset_nr(leaf, nr - 1); } +/* + * extra debugging checks to make sure all the items in a key are + * well formed and in the proper order + */ static int check_node(struct btrfs_root *root, struct btrfs_path *path, int level) { @@ -561,6 +625,10 @@ static int check_node(struct btrfs_root *root, struct btrfs_path *path, return 0; } +/* + * extra checking to make sure all the items in a leaf are + * well formed and in the proper order + */ static int check_leaf(struct btrfs_root *root, struct btrfs_path *path, int level) { @@ -782,6 +850,10 @@ static int bin_search(struct extent_buffer *eb, struct btrfs_key *key, return -1; } +/* given a node and slot number, this reads the blocks it points to. The + * extent buffer is returned with a reference taken (but unlocked). + * NULL is returned on error. + */ static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root, struct extent_buffer *parent, int slot) { @@ -798,6 +870,11 @@ static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root, btrfs_node_ptr_generation(parent, slot)); } +/* + * node level balancing, used to make sure nodes are in proper order for + * item deletion. We balance from the top down, so we have to make sure + * that a deletion won't leave an node completely empty later on. + */ static noinline int balance_level(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level) @@ -1024,7 +1101,10 @@ enospc: return ret; } -/* returns zero if the push worked, non-zero otherwise */ +/* Node balancing for insertion. Here we only split or push nodes around + * when they are completely full. This is also done top down, so we + * have to be pessimistic. + */ static int noinline push_nodes_for_insert(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level) @@ -1150,7 +1230,8 @@ static int noinline push_nodes_for_insert(struct btrfs_trans_handle *trans, } /* - * readahead one full node of leaves + * readahead one full node of leaves, finding things that are close + * to the block in 'slot', and triggering ra on them. */ static noinline void reada_for_search(struct btrfs_root *root, struct btrfs_path *path, @@ -1226,6 +1307,19 @@ static noinline void reada_for_search(struct btrfs_root *root, } } +/* + * when we walk down the tree, it is usually safe to unlock the higher layers in + * the tree. The exceptions are when our path goes through slot 0, because operations + * on the tree might require changing key pointers higher up in the tree. + * + * callers might also have set path->keep_locks, which tells this code to + * keep the lock if the path points to the last slot in the block. This is + * part of walking through the tree, and selecting the next slot in the higher + * block. + * + * lowest_unlock sets the lowest level in the tree we're allowed to unlock. + * so if lowest_unlock is 1, level 0 won't be unlocked + */ static noinline void unlock_up(struct btrfs_path *path, int level, int lowest_unlock) { @@ -2705,6 +2799,12 @@ again: return ret; } +/* + * make the item pointed to by the path smaller. new_size indicates + * how small to make it, and from_end tells us if we just chop bytes + * off the end of the item or if we shift the item to chop bytes off + * the front. + */ int btrfs_truncate_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, @@ -2818,6 +2918,9 @@ int btrfs_truncate_item(struct btrfs_trans_handle *trans, return ret; } +/* + * make the item pointed to by the path bigger, data_size is the new size. + */ int btrfs_extend_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u32 data_size) @@ -2897,7 +3000,7 @@ int btrfs_extend_item(struct btrfs_trans_handle *trans, } /* - * Given a key and some data, insert an item into the tree. + * Given a key and some data, insert items into the tree. * This does all the path init required, making room in the tree if needed. */ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, @@ -3046,9 +3149,8 @@ int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root /* * delete the pointer from a given node. * - * If the delete empties a node, the node is removed from the tree, - * continuing all the way the root if required. The root is converted into - * a leaf if all the nodes are emptied. + * the tree should have been previously balanced so the deletion does not + * empty a node. */ static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level, int slot) @@ -3233,6 +3335,9 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, * search the tree again to find a leaf with lesser keys * returns 0 if it found something or 1 if there are no lesser leaves. * returns < 0 on io errors. + * + * This may release the path, and so you may lose any locks held at the + * time you call it. */ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) { @@ -3265,9 +3370,7 @@ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) /* * A helper function to walk down the tree starting at min_key, and looking * for nodes or leaves that are either in cache or have a minimum - * transaction id. This is used by the btree defrag code, but could - * also be used to search for blocks that have changed since a given - * transaction id. + * transaction id. This is used by the btree defrag code, and tree logging * * This does not cow, but it does stuff the starting key it finds back * into min_key, so you can call btrfs_search_slot with cow=1 on the @@ -3279,6 +3382,10 @@ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) * This honors path->lowest_level to prevent descent past a given level * of the tree. * + * min_trans indicates the oldest transaction that you are interested + * in walking through. Any nodes or leaves older than min_trans are + * skipped over (without reading them). + * * returns zero if something useful was found, < 0 on error and 1 if there * was nothing in the tree that matched the search criteria. */ diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 0079b60b18f..ded1643c027 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -27,7 +27,6 @@ #include #include #include -#include "bit-radix.h" #include "extent_io.h" #include "extent_map.h" #include "async-thread.h" diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c index e4f30090d64..5040b71f190 100644 --- a/fs/btrfs/dir-item.c +++ b/fs/btrfs/dir-item.c @@ -21,6 +21,14 @@ #include "hash.h" #include "transaction.h" +/* + * insert a name into a directory, doing overflow properly if there is a hash + * collision. data_size indicates how big the item inserted should be. On + * success a struct btrfs_dir_item pointer is returned, otherwise it is + * an ERR_PTR. + * + * The name is not copied into the dir item, you have to do that yourself. + */ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle *trans, struct btrfs_root *root, @@ -55,6 +63,10 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle return (struct btrfs_dir_item *)ptr; } +/* + * xattrs work a lot like directories, this inserts an xattr item + * into the tree + */ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, const char *name, u16 name_len, const void *data, u16 data_len, @@ -109,6 +121,13 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans, return ret; } +/* + * insert a directory item in the tree, doing all the magic for + * both indexes. 'dir' indicates which objectid to insert it into, + * 'location' is the key to stuff into the directory item, 'type' is the + * type of the inode we're pointing to, and 'index' is the sequence number + * to use for the second index (if one is created). + */ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, const char *name, int name_len, u64 dir, struct btrfs_key *location, u8 type, u64 index) @@ -184,6 +203,11 @@ out: return 0; } +/* + * lookup a directory item based on name. 'dir' is the objectid + * we're searching in, and 'mod' tells us if you plan on deleting the + * item (use mod < 0) or changing the options (use mod > 0) + */ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 dir, @@ -222,6 +246,14 @@ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans, return btrfs_match_dir_item_name(root, path, name, name_len); } +/* + * lookup a directory item based on index. 'dir' is the objectid + * we're searching in, and 'mod' tells us if you plan on deleting the + * item (use mod < 0) or changing the options (use mod > 0) + * + * The name is used to make sure the index really points to the name you were + * looking for. + */ struct btrfs_dir_item * btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, @@ -282,6 +314,11 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans, return btrfs_match_dir_item_name(root, path, name, name_len); } +/* + * helper function to look at the directory item pointed to by 'path' + * this walks through all the entries in a dir item and finds one + * for a specific name. + */ struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root, struct btrfs_path *path, const char *name, int name_len) @@ -313,6 +350,10 @@ struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root, return NULL; } +/* + * given a pointer into a directory item, delete it. This + * handles items that have more than one entry in them. + */ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 45b4f728527..5ee10d3136f 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -55,6 +55,11 @@ static int check_tree_block(struct btrfs_root *root, struct extent_buffer *buf) static struct extent_io_ops btree_extent_io_ops; static void end_workqueue_fn(struct btrfs_work *work); +/* + * end_io_wq structs are used to do processing in task context when an IO is + * complete. This is used during reads to verify checksums, and it is used + * by writes to insert metadata for new file extents after IO is complete. + */ struct end_io_wq { struct bio *bio; bio_end_io_t *end_io; @@ -66,6 +71,11 @@ struct end_io_wq { struct btrfs_work work; }; +/* + * async submit bios are used to offload expensive checksumming + * onto the worker threads. They checksum file and metadata bios + * just before they are sent down the IO stack. + */ struct async_submit_bio { struct inode *inode; struct bio *bio; @@ -76,6 +86,10 @@ struct async_submit_bio { struct btrfs_work work; }; +/* + * extents on the btree inode are pretty simple, there's one extent + * that covers the entire device + */ struct extent_map *btree_get_extent(struct inode *inode, struct page *page, size_t page_offset, u64 start, u64 len, int create) @@ -151,6 +165,10 @@ void btrfs_csum_final(u32 crc, char *result) *(__le32 *)result = ~cpu_to_le32(crc); } +/* + * compute the csum for a btree block, and either verify it or write it + * into the csum field of the block. + */ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf, int verify) { @@ -204,6 +222,12 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf, return 0; } +/* + * we can't consider a given block up to date unless the transid of the + * block matches the transid in the parent node's pointer. This is how we + * detect blocks that either didn't get written at all or got written + * in the wrong place. + */ static int verify_parent_transid(struct extent_io_tree *io_tree, struct extent_buffer *eb, u64 parent_transid) { @@ -228,9 +252,12 @@ out: unlock_extent(io_tree, eb->start, eb->start + eb->len - 1, GFP_NOFS); return ret; - } +/* + * helper to read a given tree block, doing retries as required when + * the checksums don't match and we have alternate mirrors to try. + */ static int btree_read_extent_buffer_pages(struct btrfs_root *root, struct extent_buffer *eb, u64 start, u64 parent_transid) @@ -260,6 +287,10 @@ printk("read extent buffer pages failed with ret %d mirror no %d\n", ret, mirror return -EIO; } +/* + * checksum a dirty tree block before IO. This has extra checks to make + * sure we only fill in the checksum field in the first page of a multi-page block + */ int csum_dirty_buffer(struct btrfs_root *root, struct page *page) { struct extent_io_tree *tree; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 8bd1b402f3f..563b2d12f4f 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -914,6 +914,10 @@ int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end) } EXPORT_SYMBOL(wait_on_extent_writeback); +/* + * either insert or lock state struct between start and end use mask to tell + * us if waiting is desired. + */ int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) { int err; @@ -982,6 +986,13 @@ int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) } EXPORT_SYMBOL(set_range_writeback); +/* + * find the first offset in the io tree with 'bits' set. zero is + * returned if we find something, and *start_ret and *end_ret are + * set to reflect the state struct that was found. + * + * If nothing was found, 1 is returned, < 0 on error + */ int find_first_extent_bit(struct extent_io_tree *tree, u64 start, u64 *start_ret, u64 *end_ret, int bits) { @@ -1017,6 +1028,10 @@ out: } EXPORT_SYMBOL(find_first_extent_bit); +/* find the first state struct with 'bits' set after 'start', and + * return it. tree->lock must be held. NULL will returned if + * nothing was found after 'start' + */ struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree, u64 start, int bits) { @@ -1046,8 +1061,14 @@ out: } EXPORT_SYMBOL(find_first_extent_bit_state); -u64 find_lock_delalloc_range(struct extent_io_tree *tree, - u64 *start, u64 *end, u64 max_bytes) +/* + * find a contiguous range of bytes in the file marked as delalloc, not + * more than 'max_bytes'. start and end are used to return the range, + * + * 1 is returned if we find something, 0 if nothing was in the tree + */ +static noinline u64 find_lock_delalloc_range(struct extent_io_tree *tree, + u64 *start, u64 *end, u64 max_bytes) { struct rb_node *node; struct extent_state *state; @@ -1130,6 +1151,11 @@ out: return found; } +/* + * count the number of bytes in the tree that have a given bit(s) + * set. This can be fairly slow, except for EXTENT_DIRTY which is + * cached. The total number found is returned. + */ u64 count_range_bits(struct extent_io_tree *tree, u64 *start, u64 search_end, u64 max_bytes, unsigned long bits) @@ -1245,6 +1271,10 @@ int unlock_range(struct extent_io_tree *tree, u64 start, u64 end) } EXPORT_SYMBOL(unlock_range); +/* + * set the private field for a given byte offset in the tree. If there isn't + * an extent_state there already, this does nothing. + */ int set_state_private(struct extent_io_tree *tree, u64 start, u64 private) { struct rb_node *node; diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 78ced11d18c..74b2a29880d 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -114,6 +114,10 @@ static struct rb_node *tree_insert(struct rb_root *root, u64 offset, return NULL; } +/* + * search through the tree for an extent_map with a given offset. If + * it can't be found, try to find some neighboring extents + */ static struct rb_node *__tree_search(struct rb_root *root, u64 offset, struct rb_node **prev_ret, struct rb_node **next_ret) @@ -160,6 +164,10 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 offset, return NULL; } +/* + * look for an offset in the tree, and if it can't be found, return + * the first offset we can find smaller than 'offset'. + */ static inline struct rb_node *tree_search(struct rb_root *root, u64 offset) { struct rb_node *prev; @@ -170,6 +178,7 @@ static inline struct rb_node *tree_search(struct rb_root *root, u64 offset) return ret; } +/* check to see if two extent_map structs are adjacent and safe to merge */ static int mergable_maps(struct extent_map *prev, struct extent_map *next) { if (test_bit(EXTENT_FLAG_PINNED, &prev->flags)) @@ -250,6 +259,7 @@ out: } EXPORT_SYMBOL(add_extent_mapping); +/* simple helper to do math around the end of an extent, handling wrap */ static u64 range_end(u64 start, u64 len) { if (start + len < start) diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 1b7e51a9db0..3088a118448 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -41,6 +41,9 @@ #include "compat.h" +/* simple helper to fault in pages and copy. This should go away + * and be replaced with calls into generic code. + */ static int noinline btrfs_copy_from_user(loff_t pos, int num_pages, int write_bytes, struct page **prepared_pages, @@ -72,12 +75,19 @@ static int noinline btrfs_copy_from_user(loff_t pos, int num_pages, return page_fault ? -EFAULT : 0; } +/* + * unlocks pages after btrfs_file_write is done with them + */ static void noinline btrfs_drop_pages(struct page **pages, size_t num_pages) { size_t i; for (i = 0; i < num_pages; i++) { if (!pages[i]) break; + /* page checked is some magic around finding pages that + * have been modified without going through btrfs_set_page_dirty + * clear it here + */ ClearPageChecked(pages[i]); unlock_page(pages[i]); mark_page_accessed(pages[i]); @@ -85,6 +95,10 @@ static void noinline btrfs_drop_pages(struct page **pages, size_t num_pages) } } +/* this does all the hard work for inserting an inline extent into + * the btree. Any existing inline extent is extended as required to make room, + * otherwise things are inserted as required into the btree + */ static int noinline insert_inline_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, u64 offset, size_t size, @@ -228,6 +242,14 @@ fail: return err; } +/* + * after copy_from_user, pages need to be dirtied and we need to make + * sure holes are created between the current EOF and the start of + * any next extents (if required). + * + * this also makes the decision about creating an inline extent vs + * doing real data extents, marking pages dirty and delalloc as required. + */ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct file *file, @@ -362,6 +384,10 @@ out_unlock: return err; } +/* + * this drops all the extents in the cache that intersect the range + * [start, end]. Existing extents are split as required. + */ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, int skip_pinned) { @@ -536,6 +562,9 @@ out: * If an extent intersects the range but is not entirely inside the range * it is either truncated or split. Anything entirely inside the range * is deleted from the tree. + * + * inline_limit is used to tell this code which offsets in the file to keep + * if they contain inline extents. */ int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, @@ -796,7 +825,9 @@ out: } /* - * this gets pages into the page cache and locks them down + * this gets pages into the page cache and locks them down, it also properly + * waits for data=ordered extents to finish before allowing the pages to be + * modified. */ static int noinline prepare_pages(struct btrfs_root *root, struct file *file, struct page **pages, size_t num_pages, @@ -1034,6 +1065,17 @@ int btrfs_release_file(struct inode * inode, struct file * filp) return 0; } +/* + * fsync call for both files and directories. This logs the inode into + * the tree log instead of forcing full commits whenever possible. + * + * It needs to call filemap_fdatawait so that all ordered extent updates are + * in the metadata btree are up to date for copying to the log. + * + * It drops the inode mutex before doing the tree log commit. This is an + * important optimization for directories because holding the mutex prevents + * new operations on the dir while we write to disk. + */ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync) { struct inode *inode = dentry->d_inode; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 404704d2682..f3abecc2d14 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -83,6 +83,10 @@ static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = { static void btrfs_truncate(struct inode *inode); +/* + * a very lame attempt at stopping writes when the FS is 85% full. There + * are countless ways this is incorrect, but it is better than nothing. + */ int btrfs_check_free_space(struct btrfs_root *root, u64 num_required, int for_del) { @@ -108,6 +112,12 @@ int btrfs_check_free_space(struct btrfs_root *root, u64 num_required, return ret; } +/* + * when extent_io.c finds a delayed allocation range in the file, + * the call backs end up in this code. The basic idea is to + * allocate extents on disk for the range, and create ordered data structs + * in ram to track those extents. + */ static int cow_file_range(struct inode *inode, u64 start, u64 end) { struct btrfs_root *root = BTRFS_I(inode)->root; @@ -185,6 +195,13 @@ out: return ret; } +/* + * when nowcow writeback call back. This checks for snapshots or COW copies + * of the extents that exist in the file, and COWs the file as required. + * + * If no cow copies or snapshots exist, we write directly to the existing + * blocks on disk + */ static int run_delalloc_nocow(struct inode *inode, u64 start, u64 end) { u64 extent_start; @@ -291,6 +308,9 @@ out: return err; } +/* + * extent_io.c call back to do delayed allocation processing + */ static int run_delalloc_range(struct inode *inode, u64 start, u64 end) { struct btrfs_root *root = BTRFS_I(inode)->root; @@ -305,6 +325,11 @@ static int run_delalloc_range(struct inode *inode, u64 start, u64 end) return ret; } +/* + * extent_io.c set_bit_hook, used to track delayed allocation + * bytes in this file, and to maintain the list of inodes that + * have pending delalloc work to be done. + */ int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end, unsigned long old, unsigned long bits) { @@ -323,6 +348,9 @@ int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end, return 0; } +/* + * extent_io.c clear_bit_hook, see set_bit_hook for why + */ int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end, unsigned long old, unsigned long bits) { @@ -349,6 +377,10 @@ int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end, return 0; } +/* + * extent_io.c merge_bio_hook, this must check the chunk tree to make sure + * we don't create bios that span stripes or chunks + */ int btrfs_merge_bio_hook(struct page *page, unsigned long offset, size_t size, struct bio *bio) { @@ -371,6 +403,14 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset, return 0; } +/* + * in order to insert checksums into the metadata in large chunks, + * we wait until bio submission time. All the pages in the bio are + * checksummed and sums are attached onto the ordered extent record. + * + * At IO completion time the cums attached on the ordered extent record + * are inserted into the btree + */ int __btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, int mirror_num) { @@ -383,6 +423,10 @@ int __btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, return btrfs_map_bio(root, rw, bio, mirror_num, 1); } +/* + * extent_io.c submission hook. This does the right thing for csum calculation on write, + * or reading the csums from the tree before a read + */ int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, int mirror_num) { @@ -408,6 +452,10 @@ mapit: return btrfs_map_bio(root, rw, bio, mirror_num, 0); } +/* + * given a list of ordered sums record them in the inode. This happens + * at IO completion time based on sums calculated at bio submission time. + */ static noinline int add_pending_csums(struct btrfs_trans_handle *trans, struct inode *inode, u64 file_offset, struct list_head *list) @@ -430,12 +478,12 @@ int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end) GFP_NOFS); } +/* see btrfs_writepage_start_hook for details on why this is required */ struct btrfs_writepage_fixup { struct page *page; struct btrfs_work work; }; -/* see btrfs_writepage_start_hook for details on why this is required */ void btrfs_writepage_fixup_worker(struct btrfs_work *work) { struct btrfs_writepage_fixup *fixup; @@ -522,6 +570,10 @@ int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end) return -EAGAIN; } +/* as ordered data IO finishes, this gets called so we can finish + * an ordered extent if the range of bytes in the file it covers are + * fully written. + */ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) { struct btrfs_root *root = BTRFS_I(inode)->root; @@ -631,6 +683,14 @@ int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, return btrfs_finish_ordered_io(page->mapping->host, start, end); } +/* + * When IO fails, either with EIO or csum verification fails, we + * try other mirrors that might have a good copy of the data. This + * io_failure_record is used to record state as we go through all the + * mirrors. If another mirror has good data, the page is set up to date + * and things continue. If a good mirror can't be found, the original + * bio end_io callback is called to indicate things have failed. + */ struct io_failure_record { struct page *page; u64 start; @@ -725,6 +785,10 @@ int btrfs_io_failed_hook(struct bio *failed_bio, return 0; } +/* + * each time an IO finishes, we do a fast check in the IO failure tree + * to see if we need to process or clean up an io_failure_record + */ int btrfs_clean_io_failures(struct inode *inode, u64 start) { u64 private; @@ -753,6 +817,11 @@ int btrfs_clean_io_failures(struct inode *inode, u64 start) return 0; } +/* + * when reads are done, we need to check csums to verify the data is correct + * if there's a match, we allow the bio to finish. If not, we go through + * the io_failure_record routines to find good copies + */ int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, struct extent_state *state) { @@ -990,6 +1059,9 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) btrfs_free_path(path); } +/* + * read an inode from the btree into the in-memory inode + */ void btrfs_read_locked_inode(struct inode *inode) { struct btrfs_path *path; @@ -1083,6 +1155,9 @@ make_bad: make_bad_inode(inode); } +/* + * given a leaf and an inode, copy the inode fields into the leaf + */ static void fill_inode_item(struct btrfs_trans_handle *trans, struct extent_buffer *leaf, struct btrfs_inode_item *item, @@ -1118,6 +1193,9 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, BTRFS_I(inode)->block_group->key.objectid); } +/* + * copy everything in the in-memory inode into the btree. + */ int noinline btrfs_update_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode) @@ -1151,6 +1229,11 @@ failed: } +/* + * unlink helper that gets used here in inode.c and in the tree logging + * recovery code. It remove a link in a directory with a given name, and + * also drops the back refs in the inode to the directory + */ int btrfs_unlink_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *dir, struct inode *inode, @@ -1309,7 +1392,7 @@ fail: /* * this can truncate away extent items, csum items and directory items. * It starts at a high offset and removes keys until it can't find - * any higher than i_size. + * any higher than new_size * * csum items that cross the new i_size are truncated to the new size * as well. @@ -2123,6 +2206,11 @@ void btrfs_dirty_inode(struct inode *inode) btrfs_end_transaction(trans, root); } +/* + * find the highest existing sequence number in a directory + * and then set the in-memory index_cnt variable to reflect + * free sequence numbers + */ static int btrfs_set_inode_index_count(struct inode *inode) { struct btrfs_root *root = BTRFS_I(inode)->root; @@ -2175,6 +2263,10 @@ out: return ret; } +/* + * helper to find a free sequence number in a given directory. This current + * code is very simple, later versions will do smarter things in the btree + */ static int btrfs_set_inode_index(struct inode *dir, struct inode *inode, u64 *index) { @@ -2305,6 +2397,12 @@ static inline u8 btrfs_inode_type(struct inode *inode) return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT]; } +/* + * utility function to add 'inode' into 'parent_inode' with + * a give name and a given sequence number. + * if 'add_backref' is true, also insert a backref from the + * inode to the parent directory. + */ int btrfs_add_link(struct btrfs_trans_handle *trans, struct inode *parent_inode, struct inode *inode, const char *name, int name_len, int add_backref, u64 index) @@ -2611,6 +2709,10 @@ out_unlock: return err; } +/* helper for btfs_get_extent. Given an existing extent in the tree, + * and an extent that you want to insert, deal with overlap and insert + * the new extent into the tree. + */ static int merge_extent_mapping(struct extent_map_tree *em_tree, struct extent_map *existing, struct extent_map *em, @@ -2627,6 +2729,14 @@ static int merge_extent_mapping(struct extent_map_tree *em_tree, return add_extent_mapping(em_tree, em); } +/* + * a bit scary, this does extent mapping from logical file offset to the disk. + * the ugly parts come from merging extents from the disk with the + * in-ram representation. This gets more complex because of the data=ordered code, + * where the in-ram extents might be locked pending data=ordered completion. + * + * This also copies inline extents directly into the page. + */ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, size_t pg_offset, u64 start, u64 len, int create) @@ -2869,76 +2979,11 @@ out: return em; } -#if 0 /* waiting for O_DIRECT reads */ -static int btrfs_get_block(struct inode *inode, sector_t iblock, - struct buffer_head *bh_result, int create) -{ - struct extent_map *em; - u64 start = (u64)iblock << inode->i_blkbits; - struct btrfs_multi_bio *multi = NULL; - struct btrfs_root *root = BTRFS_I(inode)->root; - u64 len; - u64 logical; - u64 map_length; - int ret = 0; - - em = btrfs_get_extent(inode, NULL, 0, start, bh_result->b_size, 0); - - if (!em || IS_ERR(em)) - goto out; - - if (em->start > start || em->start + em->len <= start) { - goto out; - } - - if (em->block_start == EXTENT_MAP_INLINE) { - ret = -EINVAL; - goto out; - } - - len = em->start + em->len - start; - len = min_t(u64, len, INT_LIMIT(typeof(bh_result->b_size))); - - if (em->block_start == EXTENT_MAP_HOLE || - em->block_start == EXTENT_MAP_DELALLOC) { - bh_result->b_size = len; - goto out; - } - - logical = start - em->start; - logical = em->block_start + logical; - - map_length = len; - ret = btrfs_map_block(&root->fs_info->mapping_tree, READ, - logical, &map_length, &multi, 0); - BUG_ON(ret); - bh_result->b_blocknr = multi->stripes[0].physical >> inode->i_blkbits; - bh_result->b_size = min(map_length, len); - - bh_result->b_bdev = multi->stripes[0].dev->bdev; - set_buffer_mapped(bh_result); - kfree(multi); -out: - free_extent_map(em); - return ret; -} -#endif - static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs) { return -EINVAL; -#if 0 - struct file *file = iocb->ki_filp; - struct inode *inode = file->f_mapping->host; - - if (rw == WRITE) - return -EINVAL; - - return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, - offset, nr_segs, btrfs_get_block, NULL); -#endif } static sector_t btrfs_bmap(struct address_space *mapping, sector_t iblock) @@ -3202,6 +3247,9 @@ void btrfs_invalidate_dcache_root(struct btrfs_root *root, char *name, } } +/* + * create a new subvolume directory/inode (helper for the ioctl). + */ int btrfs_create_subvol_root(struct btrfs_root *new_root, struct btrfs_trans_handle *trans, u64 new_dirid, struct btrfs_block_group_cache *block_group) @@ -3223,6 +3271,9 @@ int btrfs_create_subvol_root(struct btrfs_root *new_root, return btrfs_update_inode(trans, new_root, inode); } +/* helper function for file defrag and space balancing. This + * forces readahead on a given range of bytes in an inode + */ unsigned long btrfs_force_ra(struct address_space *mapping, struct file_ra_state *ra, struct file *file, pgoff_t offset, pgoff_t last_index) @@ -3424,6 +3475,10 @@ out_unlock: return ret; } +/* + * some fairly slow code that needs optimization. This walks the list + * of all the inodes with pending delalloc and forces them to disk. + */ int btrfs_start_delalloc_inodes(struct btrfs_root *root) { struct list_head *head = &root->fs_info->delalloc_inodes; diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index 0cc314c10d6..e30aa6e2958 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -25,6 +25,15 @@ #include "extent_io.h" #include "locking.h" +/* + * locks the per buffer mutex in an extent buffer. This uses adaptive locks + * and the spin is not tuned very extensively. The spinning does make a big + * difference in almost every workload, but spinning for the right amount of + * time needs some help. + * + * In general, we want to spin as long as the lock holder is doing btree searches, + * and we should give up if they are in more expensive code. + */ int btrfs_tree_lock(struct extent_buffer *eb) { int i; @@ -57,6 +66,10 @@ int btrfs_tree_locked(struct extent_buffer *eb) return mutex_is_locked(&eb->mutex); } +/* + * btrfs_search_slot uses this to decide if it should drop its locks + * before doing something expensive like allocating free blocks for cow. + */ int btrfs_path_lock_waiting(struct btrfs_path *path, int level) { int i; diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 951eacff242..dcc1730dd83 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -26,7 +26,6 @@ #include "btrfs_inode.h" #include "extent_io.h" - static u64 entry_end(struct btrfs_ordered_extent *entry) { if (entry->file_offset + entry->len < entry->file_offset) @@ -34,6 +33,9 @@ static u64 entry_end(struct btrfs_ordered_extent *entry) return entry->file_offset + entry->len; } +/* returns NULL if the insertion worked, or it returns the node it did find + * in the tree + */ static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, struct rb_node *node) { @@ -58,6 +60,10 @@ static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, return NULL; } +/* + * look for a given offset in the tree, and if it can't be found return the + * first lesser offset + */ static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, struct rb_node **prev_ret) { @@ -108,6 +114,9 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, return NULL; } +/* + * helper to check if a given offset is inside a given entry + */ static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset) { if (file_offset < entry->file_offset || @@ -116,6 +125,10 @@ static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset) return 1; } +/* + * look find the first ordered struct that has this offset, otherwise + * the first one less than this offset + */ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, u64 file_offset) { @@ -305,6 +318,10 @@ int btrfs_remove_ordered_extent(struct inode *inode, return 0; } +/* + * wait for all the ordered extents in a root. This is done when balancing + * space between drives. + */ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only) { struct list_head splice; diff --git a/fs/btrfs/ref-cache.c b/fs/btrfs/ref-cache.c index 30fcb7aea5b..a50ebb67055 100644 --- a/fs/btrfs/ref-cache.c +++ b/fs/btrfs/ref-cache.c @@ -21,6 +21,16 @@ #include "ref-cache.h" #include "transaction.h" +/* + * leaf refs are used to cache the information about which extents + * a given leaf has references on. This allows us to process that leaf + * in btrfs_drop_snapshot without needing to read it back from disk. + */ + +/* + * kmalloc a leaf reference struct and update the counters for the + * total ref cache size + */ struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root, int nr_extents) { @@ -40,6 +50,10 @@ struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root, return ref; } +/* + * free a leaf reference struct and update the counters for the + * total ref cache size + */ void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref) { if (!ref) @@ -135,6 +149,10 @@ int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen, return 0; } +/* + * find the leaf ref for a given extent. This returns the ref struct with + * a usage reference incremented + */ struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root, u64 bytenr) { @@ -160,6 +178,10 @@ again: return NULL; } +/* + * add a fully filled in leaf ref struct + * remove all the refs older than a given root generation + */ int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref, int shared) { @@ -184,6 +206,10 @@ int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref, return ret; } +/* + * remove a single leaf ref from the tree. This drops the ref held by the tree + * only + */ int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref) { struct btrfs_leaf_ref_tree *tree; diff --git a/fs/btrfs/ref-cache.h b/fs/btrfs/ref-cache.h index 617564787f5..16f3183d7c5 100644 --- a/fs/btrfs/ref-cache.h +++ b/fs/btrfs/ref-cache.h @@ -19,8 +19,11 @@ #define __REFCACHE__ struct btrfs_extent_info { + /* bytenr and num_bytes find the extent in the extent allocation tree */ u64 bytenr; u64 num_bytes; + + /* objectid and offset find the back reference for the file */ u64 objectid; u64 offset; }; diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index 0091c01abb0..eb7f7655e9d 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c @@ -22,8 +22,10 @@ #include "print-tree.h" /* - * returns 0 on finding something, 1 if no more roots are there - * and < 0 on error + * search forward for a root, starting with objectid 'search_start' + * if a root key is found, the objectid we find is filled into 'found_objectid' + * and 0 is returned. < 0 is returned on error, 1 if there is nothing + * left in the tree. */ int btrfs_search_root(struct btrfs_root *root, u64 search_start, u64 *found_objectid) @@ -66,6 +68,11 @@ out: return ret; } +/* + * lookup the root with the highest offset for a given objectid. The key we do + * find is copied into 'key'. If we find something return 0, otherwise 1, < 0 + * on error. + */ int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, struct btrfs_root_item *item, struct btrfs_key *key) { @@ -104,6 +111,9 @@ out: return ret; } +/* + * copy the data in 'item' into the btree + */ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_key *key, struct btrfs_root_item *item) @@ -147,6 +157,12 @@ int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root return ret; } +/* + * at mount time we want to find all the old transaction snapshots that were in + * the process of being deleted if we crashed. This is any root item with an offset + * lower than the latest root. They need to be queued for deletion to finish + * what was happening when we crashed. + */ int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid, struct btrfs_root *latest) { @@ -227,6 +243,7 @@ err: return ret; } +/* drop the root item for 'key' from 'root' */ int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_key *key) { diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c index ad03a32d111..cdedbe144d4 100644 --- a/fs/btrfs/struct-funcs.c +++ b/fs/btrfs/struct-funcs.c @@ -17,6 +17,27 @@ */ #include + +/* this is some deeply nasty code. ctree.h has a different + * definition for this BTRFS_SETGET_FUNCS macro, behind a #ifndef + * + * The end result is that anyone who #includes ctree.h gets a + * declaration for the btrfs_set_foo functions and btrfs_foo functions + * + * This file declares the macros and then #includes ctree.h, which results + * in cpp creating the function here based on the template below. + * + * These setget functions do all the extent_buffer related mapping + * required to efficiently read and write specific fields in the extent + * buffers. Every pointer to metadata items in btrfs is really just + * an unsigned long offset into the extent buffer which has been + * cast to a specific type. This gives us all the gcc type checking. + * + * The extent buffer api is used to do all the kmapping and page + * spanning work required to get extent buffers in highmem and have + * a metadata blocksize different from the page size. + */ + #define BTRFS_SETGET_FUNCS(name, type, member, bits) \ u##bits btrfs_##name(struct extent_buffer *eb, \ type *s) \ diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 8399d6d05d6..2e6039825b7 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -519,6 +519,9 @@ static struct file_system_type btrfs_fs_type = { .fs_flags = FS_REQUIRES_DEV, }; +/* + * used by btrfsctl to scan devices when no FS is mounted + */ static long btrfs_control_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 444abe0796a..11266d68a6c 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -46,6 +46,9 @@ static noinline void put_transaction(struct btrfs_transaction *transaction) } } +/* + * either allocate a new transaction or hop into the existing one + */ static noinline int join_transaction(struct btrfs_root *root) { struct btrfs_transaction *cur_trans; @@ -85,6 +88,12 @@ static noinline int join_transaction(struct btrfs_root *root) return 0; } +/* + * this does all the record keeping required to make sure that a + * reference counted root is properly recorded in a given transaction. + * This is required to make sure the old root from before we joined the transaction + * is deleted when the transaction commits + */ noinline int btrfs_record_root_in_trans(struct btrfs_root *root) { struct btrfs_dirty_root *dirty; @@ -127,6 +136,10 @@ noinline int btrfs_record_root_in_trans(struct btrfs_root *root) return 0; } +/* wait for commit against the current transaction to become unblocked + * when this is done, it is safe to start a new transaction, but the current + * transaction might not be fully on disk. + */ static void wait_current_trans(struct btrfs_root *root) { struct btrfs_transaction *cur_trans; @@ -198,7 +211,7 @@ struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r, return start_transaction(r, num_blocks, 2); } - +/* wait for a transaction commit to be fully complete */ static noinline int wait_for_commit(struct btrfs_root *root, struct btrfs_transaction *commit) { @@ -218,6 +231,10 @@ static noinline int wait_for_commit(struct btrfs_root *root, return 0; } +/* + * rate limit against the drop_snapshot code. This helps to slow down new operations + * if the drop_snapshot code isn't able to keep up. + */ static void throttle_on_drops(struct btrfs_root *root) { struct btrfs_fs_info *info = root->fs_info; @@ -302,7 +319,11 @@ int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans, return __btrfs_end_transaction(trans, root, 1); } - +/* + * when btree blocks are allocated, they have some corresponding bits set for + * them in one of two extent_io trees. This is used to make sure all of + * those extents are on disk for transaction or log commit + */ int btrfs_write_and_wait_marked_extents(struct btrfs_root *root, struct extent_io_tree *dirty_pages) { @@ -393,6 +414,16 @@ int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, &trans->transaction->dirty_pages); } +/* + * this is used to update the root pointer in the tree of tree roots. + * + * But, in the case of the extent allocation tree, updating the root + * pointer may allocate blocks which may change the root of the extent + * allocation tree. + * + * So, this loops and repeats and makes sure the cowonly root didn't + * change while the root pointer was being updated in the metadata. + */ static int update_cowonly_root(struct btrfs_trans_handle *trans, struct btrfs_root *root) { @@ -418,6 +449,9 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans, return 0; } +/* + * update all the cowonly tree roots on disk + */ int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans, struct btrfs_root *root) { @@ -433,6 +467,11 @@ int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans, return 0; } +/* + * dead roots are old snapshots that need to be deleted. This allocates + * a dirty root struct and adds it into the list of dead roots that need to + * be deleted + */ int btrfs_add_dead_root(struct btrfs_root *root, struct btrfs_root *latest) { struct btrfs_dirty_root *dirty; @@ -449,6 +488,12 @@ int btrfs_add_dead_root(struct btrfs_root *root, struct btrfs_root *latest) return 0; } +/* + * at transaction commit time we need to schedule the old roots for + * deletion via btrfs_drop_snapshot. This runs through all the + * reference counted roots that were modified in the current + * transaction and puts them into the drop list + */ static noinline int add_dirty_roots(struct btrfs_trans_handle *trans, struct radix_tree_root *radix, struct list_head *list) @@ -541,6 +586,10 @@ static noinline int add_dirty_roots(struct btrfs_trans_handle *trans, return err; } +/* + * defrag a given btree. If cacheonly == 1, this won't read from the disk, + * otherwise every leaf in the btree is read and defragged. + */ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly) { struct btrfs_fs_info *info = root->fs_info; @@ -570,6 +619,10 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly) return 0; } +/* + * Given a list of roots that need to be deleted, call btrfs_drop_snapshot on + * all of them + */ static noinline int drop_dirty_roots(struct btrfs_root *tree_root, struct list_head *list) { @@ -664,6 +717,10 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root, return ret; } +/* + * new snapshots need to be created at a very specific time in the + * transaction commit. This does the actual creation + */ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, struct btrfs_pending_snapshot *pending) @@ -734,6 +791,9 @@ fail: return ret; } +/* + * create all the snapshots we've scheduled for creation + */ static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info) { @@ -944,6 +1004,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, return ret; } +/* + * interface function to delete all the snapshots we have scheduled for deletion + */ int btrfs_clean_old_snapshots(struct btrfs_root *root) { struct list_head dirty_roots; diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c index b3bb5bbad76..6f57d0889b1 100644 --- a/fs/btrfs/tree-defrag.c +++ b/fs/btrfs/tree-defrag.c @@ -23,6 +23,10 @@ #include "transaction.h" #include "locking.h" +/* defrag all the leaves in a given btree. If cache_only == 1, don't read things + * from disk, otherwise read all the leaves and try to get key order to + * better reflect disk order + */ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, struct btrfs_root *root, int cache_only) { -- cgit v1.2.3-70-g09d2 From cb843a6f513a1a91c54951005e60bd9b95bdf973 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 3 Oct 2008 12:30:02 -0400 Subject: Btrfs: O_DIRECT writes via buffered writes + invaldiate This reworks the btrfs O_DIRECT write code a bit. It had always fallen back to buffered IO and done an invalidate, but needed to be updated for the data=ordered code. The invalidate wasn't actually removing pages because they were still inside an ordered extent. This also combines the O_DIRECT/O_SYNC paths where possible, and kicks off IO in the main btrfs_file_write loop to keep the pipe down the the disk full as we process long writes. Signed-off-by: Chris Mason --- fs/btrfs/file.c | 64 +++++++++++++++++++++++++++---------------------- fs/btrfs/ordered-data.c | 3 ++- fs/btrfs/ordered-data.h | 2 +- 3 files changed, 38 insertions(+), 31 deletions(-) (limited to 'fs/btrfs/ordered-data.c') diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 3088a118448..a03d1bbb19a 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -905,6 +905,10 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf, struct page *pinned[2]; unsigned long first_index; unsigned long last_index; + int will_write; + + will_write = ((file->f_flags & O_SYNC) || IS_SYNC(inode) || + (file->f_flags & O_DIRECT)); nrptrs = min((count + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / (sizeof(struct page *))); @@ -1001,15 +1005,24 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf, if (ret) goto out; + if (will_write) { + btrfs_fdatawrite_range(inode->i_mapping, pos, + pos + write_bytes - 1, + WB_SYNC_NONE); + } else { + balance_dirty_pages_ratelimited_nr(inode->i_mapping, + num_pages); + if (num_pages < + (root->leafsize >> PAGE_CACHE_SHIFT) + 1) + btrfs_btree_balance_dirty(root, 1); + btrfs_throttle(root); + } + buf += write_bytes; count -= write_bytes; pos += write_bytes; num_written += write_bytes; - balance_dirty_pages_ratelimited_nr(inode->i_mapping, num_pages); - if (num_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1) - btrfs_btree_balance_dirty(root, 1); - btrfs_throttle(root); cond_resched(); } out: @@ -1023,36 +1036,29 @@ out_nolock: page_cache_release(pinned[1]); *ppos = pos; - if (num_written > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { + if (num_written > 0 && will_write) { struct btrfs_trans_handle *trans; - err = btrfs_fdatawrite_range(inode->i_mapping, start_pos, - start_pos + num_written -1, - WB_SYNC_NONE); - if (err < 0) - num_written = err; - - err = btrfs_wait_on_page_writeback_range(inode->i_mapping, - start_pos, start_pos + num_written - 1); - if (err < 0) + err = btrfs_wait_ordered_range(inode, start_pos, num_written); + if (err) num_written = err; - trans = btrfs_start_transaction(root, 1); - ret = btrfs_log_dentry_safe(trans, root, file->f_dentry); - if (ret == 0) { - btrfs_sync_log(trans, root); - btrfs_end_transaction(trans, root); - } else { - btrfs_commit_transaction(trans, root); + if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) { + trans = btrfs_start_transaction(root, 1); + ret = btrfs_log_dentry_safe(trans, root, + file->f_dentry); + if (ret == 0) { + btrfs_sync_log(trans, root); + btrfs_end_transaction(trans, root); + } else { + btrfs_commit_transaction(trans, root); + } + } + if (file->f_flags & O_DIRECT) { + invalidate_mapping_pages(inode->i_mapping, + start_pos >> PAGE_CACHE_SHIFT, + (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT); } - } else if (num_written > 0 && (file->f_flags & O_DIRECT)) { - do_sync_mapping_range(inode->i_mapping, start_pos, - start_pos + num_written - 1, - SYNC_FILE_RANGE_WRITE | - SYNC_FILE_RANGE_WAIT_AFTER); - invalidate_mapping_pages(inode->i_mapping, - start_pos >> PAGE_CACHE_SHIFT, - (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT); } current->backing_dev_info = NULL; return num_written ? num_written : err; diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index dcc1730dd83..2eb6caba57c 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -397,7 +397,7 @@ void btrfs_start_ordered_extent(struct inode *inode, /* * Used to wait on ordered extents across a large range of bytes. */ -void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) +int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) { u64 end; u64 orig_end; @@ -451,6 +451,7 @@ again: (unsigned long long)orig_end); goto again; } + return 0; } /* diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index fd45519f30a..f50f8870a14 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -135,7 +135,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, u64 file_offset); void btrfs_start_ordered_extent(struct inode *inode, struct btrfs_ordered_extent *entry, int wait); -void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len); +int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len); struct btrfs_ordered_extent * btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset); int btrfs_ordered_update_i_size(struct inode *inode, -- cgit v1.2.3-70-g09d2 From c8b978188c9a0fd3d535c13debd19d522b726f1f Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 29 Oct 2008 14:49:59 -0400 Subject: Btrfs: Add zlib compression support This is a large change for adding compression on reading and writing, both for inline and regular extents. It does some fairly large surgery to the writeback paths. Compression is off by default and enabled by mount -o compress. Even when the -o compress mount option is not used, it is possible to read compressed extents off the disk. If compression for a given set of pages fails to make them smaller, the file is flagged to avoid future compression attempts later. * While finding delalloc extents, the pages are locked before being sent down to the delalloc handler. This allows the delalloc handler to do complex things such as cleaning the pages, marking them writeback and starting IO on their behalf. * Inline extents are inserted at delalloc time now. This allows us to compress the data before inserting the inline extent, and it allows us to insert an inline extent that spans multiple pages. * All of the in-memory extent representations (extent_map.c, ordered-data.c etc) are changed to record both an in-memory size and an on disk size, as well as a flag for compression. From a disk format point of view, the extent pointers in the file are changed to record the on disk size of a given extent and some encoding flags. Space in the disk format is allocated for compression encoding, as well as encryption and a generic 'other' field. Neither the encryption or the 'other' field are currently used. In order to limit the amount of data read for a single random read in the file, the size of a compressed extent is limited to 128k. This is a software only limit, the disk format supports u64 sized compressed extents. In order to limit the ram consumed while processing extents, the uncompressed size of a compressed extent is limited to 256k. This is a software only limit and will be subject to tuning later. Checksumming is still done on compressed extents, and it is done on the uncompressed version of the data. This way additional encodings can be layered on without having to figure out which encoding to checksum. Compression happens at delalloc time, which is basically singled threaded because it is usually done by a single pdflush thread. This makes it tricky to spread the compression load across all the cpus on the box. We'll have to look at parallel pdflush walks of dirty inodes at a later time. Decompression is hooked into readpages and it does spread across CPUs nicely. Signed-off-by: Chris Mason --- fs/Kconfig | 2 + fs/btrfs/Makefile | 3 +- fs/btrfs/compression.c | 454 ++++++++++++++++++++++++++++++++++ fs/btrfs/compression.h | 47 ++++ fs/btrfs/ctree.h | 99 ++++++-- fs/btrfs/disk-io.c | 18 +- fs/btrfs/disk-io.h | 1 + fs/btrfs/extent-tree.c | 27 +- fs/btrfs/extent_io.c | 411 ++++++++++++++++++++++++------- fs/btrfs/extent_io.h | 17 +- fs/btrfs/extent_map.c | 9 + fs/btrfs/extent_map.h | 6 +- fs/btrfs/file-item.c | 75 +++++- fs/btrfs/file.c | 263 +++++--------------- fs/btrfs/inode.c | 584 ++++++++++++++++++++++++++++++++++++++++---- fs/btrfs/ordered-data.c | 9 +- fs/btrfs/ordered-data.h | 10 +- fs/btrfs/print-tree.c | 7 +- fs/btrfs/super.c | 10 +- fs/btrfs/tree-log.c | 3 +- fs/btrfs/volumes.c | 2 + fs/btrfs/zlib.c | 637 ++++++++++++++++++++++++++++++++++++++++++++++++ 22 files changed, 2315 insertions(+), 379 deletions(-) create mode 100644 fs/btrfs/compression.c create mode 100644 fs/btrfs/compression.h create mode 100644 fs/btrfs/zlib.c (limited to 'fs/btrfs/ordered-data.c') diff --git a/fs/Kconfig b/fs/Kconfig index 18f5a85b47c..31cce5d88b1 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -501,6 +501,8 @@ config BTRFS_FS tristate "Btrfs filesystem (EXPERIMENTAL) Unstable disk format" depends on EXPERIMENTAL select LIBCRC32C + select ZLIB_INFLATE + select ZLIB_DEFLATE help Btrfs is a new filesystem with extents, writable snapshotting, support for multiple devices and many more features. diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile index 7125716e142..d2cf5a54a4b 100644 --- a/fs/btrfs/Makefile +++ b/fs/btrfs/Makefile @@ -7,7 +7,8 @@ btrfs-y := super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \ transaction.o inode.o file.o tree-defrag.o \ extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \ extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \ - ref-cache.o export.o tree-log.o acl.o free-space-cache.o + ref-cache.o export.o tree-log.o acl.o free-space-cache.o zlib.o \ + compression.o else # Normal Makefile diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c new file mode 100644 index 00000000000..c5470367ca5 --- /dev/null +++ b/fs/btrfs/compression.c @@ -0,0 +1,454 @@ +/* + * Copyright (C) 2008 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License v2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ctree.h" +#include "disk-io.h" +#include "transaction.h" +#include "btrfs_inode.h" +#include "volumes.h" +#include "ordered-data.h" +#include "compat.h" +#include "compression.h" +#include "extent_io.h" +#include "extent_map.h" + +struct compressed_bio { + /* number of bios pending for this compressed extent */ + atomic_t pending_bios; + + /* the pages with the compressed data on them */ + struct page **compressed_pages; + + /* inode that owns this data */ + struct inode *inode; + + /* starting offset in the inode for our pages */ + u64 start; + + /* number of bytes in the inode we're working on */ + unsigned long len; + + /* number of bytes on disk */ + unsigned long compressed_len; + + /* number of compressed pages in the array */ + unsigned long nr_pages; + + /* IO errors */ + int errors; + + /* for reads, this is the bio we are copying the data into */ + struct bio *orig_bio; +}; + +static struct bio *compressed_bio_alloc(struct block_device *bdev, + u64 first_byte, gfp_t gfp_flags) +{ + struct bio *bio; + int nr_vecs; + + nr_vecs = bio_get_nr_vecs(bdev); + bio = bio_alloc(gfp_flags, nr_vecs); + + if (bio == NULL && (current->flags & PF_MEMALLOC)) { + while (!bio && (nr_vecs /= 2)) + bio = bio_alloc(gfp_flags, nr_vecs); + } + + if (bio) { + bio->bi_size = 0; + bio->bi_bdev = bdev; + bio->bi_sector = first_byte >> 9; + } + return bio; +} + +/* when we finish reading compressed pages from the disk, we + * decompress them and then run the bio end_io routines on the + * decompressed pages (in the inode address space). + * + * This allows the checksumming and other IO error handling routines + * to work normally + * + * The compressed pages are freed here, and it must be run + * in process context + */ +static void end_compressed_bio_read(struct bio *bio, int err) +{ + struct extent_io_tree *tree; + struct compressed_bio *cb = bio->bi_private; + struct inode *inode; + struct page *page; + unsigned long index; + int ret; + + if (err) + cb->errors = 1; + + /* if there are more bios still pending for this compressed + * extent, just exit + */ + if (!atomic_dec_and_test(&cb->pending_bios)) + goto out; + + /* ok, we're the last bio for this extent, lets start + * the decompression. + */ + inode = cb->inode; + tree = &BTRFS_I(inode)->io_tree; + ret = btrfs_zlib_decompress_biovec(cb->compressed_pages, + cb->start, + cb->orig_bio->bi_io_vec, + cb->orig_bio->bi_vcnt, + cb->compressed_len); + if (ret) + cb->errors = 1; + + /* release the compressed pages */ + index = 0; + for (index = 0; index < cb->nr_pages; index++) { + page = cb->compressed_pages[index]; + page->mapping = NULL; + page_cache_release(page); + } + + /* do io completion on the original bio */ + if (cb->errors) + bio_io_error(cb->orig_bio); + else + bio_endio(cb->orig_bio, 0); + + /* finally free the cb struct */ + kfree(cb->compressed_pages); + kfree(cb); +out: + bio_put(bio); +} + +/* + * Clear the writeback bits on all of the file + * pages for a compressed write + */ +static noinline int end_compressed_writeback(struct inode *inode, u64 start, + unsigned long ram_size) +{ + unsigned long index = start >> PAGE_CACHE_SHIFT; + unsigned long end_index = (start + ram_size - 1) >> PAGE_CACHE_SHIFT; + struct page *pages[16]; + unsigned long nr_pages = end_index - index + 1; + int i; + int ret; + + while(nr_pages > 0) { + ret = find_get_pages_contig(inode->i_mapping, index, + min(nr_pages, ARRAY_SIZE(pages)), pages); + if (ret == 0) { + nr_pages -= 1; + index += 1; + continue; + } + for (i = 0; i < ret; i++) { + end_page_writeback(pages[i]); + page_cache_release(pages[i]); + } + nr_pages -= ret; + index += ret; + } + /* the inode may be gone now */ + return 0; +} + +/* + * do the cleanup once all the compressed pages hit the disk. + * This will clear writeback on the file pages and free the compressed + * pages. + * + * This also calls the writeback end hooks for the file pages so that + * metadata and checksums can be updated in the file. + */ +static void end_compressed_bio_write(struct bio *bio, int err) +{ + struct extent_io_tree *tree; + struct compressed_bio *cb = bio->bi_private; + struct inode *inode; + struct page *page; + unsigned long index; + + if (err) + cb->errors = 1; + + /* if there are more bios still pending for this compressed + * extent, just exit + */ + if (!atomic_dec_and_test(&cb->pending_bios)) + goto out; + + /* ok, we're the last bio for this extent, step one is to + * call back into the FS and do all the end_io operations + */ + inode = cb->inode; + tree = &BTRFS_I(inode)->io_tree; + tree->ops->writepage_end_io_hook(cb->compressed_pages[0], + cb->start, + cb->start + cb->len - 1, + NULL, 1); + + end_compressed_writeback(inode, cb->start, cb->len); + /* note, our inode could be gone now */ + + /* + * release the compressed pages, these came from alloc_page and + * are not attached to the inode at all + */ + index = 0; + for (index = 0; index < cb->nr_pages; index++) { + page = cb->compressed_pages[index]; + page->mapping = NULL; + page_cache_release(page); + } + + /* finally free the cb struct */ + kfree(cb->compressed_pages); + kfree(cb); +out: + bio_put(bio); +} + +/* + * worker function to build and submit bios for previously compressed pages. + * The corresponding pages in the inode should be marked for writeback + * and the compressed pages should have a reference on them for dropping + * when the IO is complete. + * + * This also checksums the file bytes and gets things ready for + * the end io hooks. + */ +int btrfs_submit_compressed_write(struct inode *inode, u64 start, + unsigned long len, u64 disk_start, + unsigned long compressed_len, + struct page **compressed_pages, + unsigned long nr_pages) +{ + struct bio *bio = NULL; + struct btrfs_root *root = BTRFS_I(inode)->root; + struct compressed_bio *cb; + unsigned long bytes_left; + struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + int page_index = 0; + struct page *page; + u64 first_byte = disk_start; + struct block_device *bdev; + int ret; + + WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1)); + cb = kmalloc(sizeof(*cb), GFP_NOFS); + atomic_set(&cb->pending_bios, 0); + cb->errors = 0; + cb->inode = inode; + cb->start = start; + cb->len = len; + cb->compressed_pages = compressed_pages; + cb->compressed_len = compressed_len; + cb->orig_bio = NULL; + cb->nr_pages = nr_pages; + + bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; + + ret = btrfs_csum_file_bytes(root, inode, start, len); + BUG_ON(ret); + + bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); + bio->bi_private = cb; + bio->bi_end_io = end_compressed_bio_write; + atomic_inc(&cb->pending_bios); + + /* create and submit bios for the compressed pages */ + bytes_left = compressed_len; + while(bytes_left > 0) { + page = compressed_pages[page_index]; + page->mapping = inode->i_mapping; + if (bio->bi_size) + ret = io_tree->ops->merge_bio_hook(page, 0, + PAGE_CACHE_SIZE, + bio, 0); + else + ret = 0; + + if (ret || bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < + PAGE_CACHE_SIZE) { + bio_get(bio); + + ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); + BUG_ON(ret); + + ret = btrfs_map_bio(root, WRITE, bio, 0, 1); + BUG_ON(ret); + + bio_put(bio); + + bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); + atomic_inc(&cb->pending_bios); + bio->bi_private = cb; + bio->bi_end_io = end_compressed_bio_write; + bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); + } + page_index++; + bytes_left -= PAGE_CACHE_SIZE; + first_byte += PAGE_CACHE_SIZE; + } + bio_get(bio); + + ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); + BUG_ON(ret); + + ret = btrfs_map_bio(root, WRITE, bio, 0, 1); + BUG_ON(ret); + + bio_put(bio); + return 0; +} + +/* + * for a compressed read, the bio we get passed has all the inode pages + * in it. We don't actually do IO on those pages but allocate new ones + * to hold the compressed pages on disk. + * + * bio->bi_sector points to the compressed extent on disk + * bio->bi_io_vec points to all of the inode pages + * bio->bi_vcnt is a count of pages + * + * After the compressed pages are read, we copy the bytes into the + * bio we were passed and then call the bio end_io calls + */ +int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, + int mirror_num, unsigned long bio_flags) +{ + struct extent_io_tree *tree; + struct extent_map_tree *em_tree; + struct compressed_bio *cb; + struct btrfs_root *root = BTRFS_I(inode)->root; + unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; + unsigned long compressed_len; + unsigned long nr_pages; + unsigned long page_index; + struct page *page; + struct block_device *bdev; + struct bio *comp_bio; + u64 cur_disk_byte = (u64)bio->bi_sector << 9; + struct extent_map *em; + int ret; + + tree = &BTRFS_I(inode)->io_tree; + em_tree = &BTRFS_I(inode)->extent_tree; + + /* we need the actual starting offset of this extent in the file */ + spin_lock(&em_tree->lock); + em = lookup_extent_mapping(em_tree, + page_offset(bio->bi_io_vec->bv_page), + PAGE_CACHE_SIZE); + spin_unlock(&em_tree->lock); + + cb = kmalloc(sizeof(*cb), GFP_NOFS); + atomic_set(&cb->pending_bios, 0); + cb->errors = 0; + cb->inode = inode; + + cb->start = em->start; + compressed_len = em->block_len; + free_extent_map(em); + + cb->len = uncompressed_len; + cb->compressed_len = compressed_len; + cb->orig_bio = bio; + + nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) / + PAGE_CACHE_SIZE; + cb->compressed_pages = kmalloc(sizeof(struct page *) * nr_pages, + GFP_NOFS); + bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; + + for (page_index = 0; page_index < nr_pages; page_index++) { + cb->compressed_pages[page_index] = alloc_page(GFP_NOFS | + __GFP_HIGHMEM); + } + cb->nr_pages = nr_pages; + + comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); + comp_bio->bi_private = cb; + comp_bio->bi_end_io = end_compressed_bio_read; + atomic_inc(&cb->pending_bios); + + for (page_index = 0; page_index < nr_pages; page_index++) { + page = cb->compressed_pages[page_index]; + page->mapping = inode->i_mapping; + if (comp_bio->bi_size) + ret = tree->ops->merge_bio_hook(page, 0, + PAGE_CACHE_SIZE, + comp_bio, 0); + else + ret = 0; + + if (ret || bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0) < + PAGE_CACHE_SIZE) { + bio_get(comp_bio); + + ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0); + BUG_ON(ret); + + ret = btrfs_map_bio(root, READ, comp_bio, 0, 0); + BUG_ON(ret); + + bio_put(comp_bio); + + comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, + GFP_NOFS); + atomic_inc(&cb->pending_bios); + bio->bi_private = cb; + bio->bi_end_io = end_compressed_bio_write; + bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); + } + cur_disk_byte += PAGE_CACHE_SIZE; + } + bio_get(comp_bio); + + ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0); + BUG_ON(ret); + + ret = btrfs_map_bio(root, READ, comp_bio, 0, 0); + BUG_ON(ret); + + bio_put(comp_bio); + return 0; +} diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h new file mode 100644 index 00000000000..421f5b4aa71 --- /dev/null +++ b/fs/btrfs/compression.h @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2008 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License v2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef __BTRFS_COMPRESSION_ +#define __BTRFS_COMPRESSION_ + +int btrfs_zlib_decompress(unsigned char *data_in, + struct page *dest_page, + unsigned long start_byte, + size_t srclen, size_t destlen); +int btrfs_zlib_compress_pages(struct address_space *mapping, + u64 start, unsigned long len, + struct page **pages, + unsigned long nr_dest_pages, + unsigned long *out_pages, + unsigned long *total_in, + unsigned long *total_out, + unsigned long max_out); +int btrfs_zlib_decompress_biovec(struct page **pages_in, + u64 disk_start, + struct bio_vec *bvec, + int vcnt, + size_t srclen); +void btrfs_zlib_exit(void); +int btrfs_submit_compressed_write(struct inode *inode, u64 start, + unsigned long len, u64 disk_start, + unsigned long compressed_len, + struct page **compressed_pages, + unsigned long nr_pages); +int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, + int mirror_num, unsigned long bio_flags); +#endif diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8559f39fd47..793d8fdda24 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -400,10 +400,18 @@ struct btrfs_timespec { __le32 nsec; } __attribute__ ((__packed__)); -/* - * there is no padding here on purpose. If you want to extent the inode, - * make a new item type - */ +typedef enum { + BTRFS_COMPRESS_NONE = 0, + BTRFS_COMPRESS_ZLIB = 1, + BTRFS_COMPRESS_LAST = 2, +} btrfs_compression_type; + +/* we don't understand any encryption methods right now */ +typedef enum { + BTRFS_ENCRYPTION_NONE = 0, + BTRFS_ENCRYPTION_LAST = 1, +} btrfs_encryption_type; + struct btrfs_inode_item { /* nfs style generation number */ __le64 generation; @@ -419,6 +427,7 @@ struct btrfs_inode_item { __le64 rdev; __le16 flags; __le16 compat_flags; + struct btrfs_timespec atime; struct btrfs_timespec ctime; struct btrfs_timespec mtime; @@ -454,8 +463,33 @@ struct btrfs_root_item { #define BTRFS_FILE_EXTENT_INLINE 1 struct btrfs_file_extent_item { + /* + * transaction id that created this extent + */ __le64 generation; + /* + * max number of bytes to hold this extent in ram + * when we split a compressed extent we can't know how big + * each of the resulting pieces will be. So, this is + * an upper limit on the size of the extent in ram instead of + * an exact limit. + */ + __le64 ram_bytes; + + /* + * 32 bits for the various ways we might encode the data, + * including compression and encryption. If any of these + * are set to something a given disk format doesn't understand + * it is treated like an incompat flag for reading and writing, + * but not for stat. + */ + u8 compression; + u8 encryption; + __le16 other_encoding; /* spare for later use */ + + /* are we inline data or a real extent? */ u8 type; + /* * disk space consumed by the extent, checksum blocks are included * in these numbers @@ -471,9 +505,11 @@ struct btrfs_file_extent_item { */ __le64 offset; /* - * the logical number of file blocks (no csums included) + * the logical number of file blocks (no csums included). This + * always reflects the size uncompressed and without encoding. */ __le64 num_bytes; + } __attribute__ ((__packed__)); struct btrfs_csum_item { @@ -814,6 +850,7 @@ struct btrfs_root { #define BTRFS_MOUNT_NOBARRIER (1 << 2) #define BTRFS_MOUNT_SSD (1 << 3) #define BTRFS_MOUNT_DEGRADED (1 << 4) +#define BTRFS_MOUNT_COMPRESS (1 << 5) #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) @@ -825,6 +862,7 @@ struct btrfs_root { #define BTRFS_INODE_NODATASUM (1 << 0) #define BTRFS_INODE_NODATACOW (1 << 1) #define BTRFS_INODE_READONLY (1 << 2) +#define BTRFS_INODE_NOCOMPRESS (1 << 3) #define btrfs_clear_flag(inode, flag) (BTRFS_I(inode)->flags &= \ ~BTRFS_INODE_##flag) #define btrfs_set_flag(inode, flag) (BTRFS_I(inode)->flags |= \ @@ -1424,14 +1462,6 @@ static inline u32 btrfs_file_extent_calc_inline_size(u32 datasize) return offsetof(struct btrfs_file_extent_item, disk_bytenr) + datasize; } -static inline u32 btrfs_file_extent_inline_len(struct extent_buffer *eb, - struct btrfs_item *e) -{ - unsigned long offset; - offset = offsetof(struct btrfs_file_extent_item, disk_bytenr); - return btrfs_item_size(eb, e) - offset; -} - BTRFS_SETGET_FUNCS(file_extent_disk_bytenr, struct btrfs_file_extent_item, disk_bytenr, 64); BTRFS_SETGET_FUNCS(file_extent_generation, struct btrfs_file_extent_item, @@ -1442,6 +1472,36 @@ BTRFS_SETGET_FUNCS(file_extent_offset, struct btrfs_file_extent_item, offset, 64); BTRFS_SETGET_FUNCS(file_extent_num_bytes, struct btrfs_file_extent_item, num_bytes, 64); +BTRFS_SETGET_FUNCS(file_extent_ram_bytes, struct btrfs_file_extent_item, + ram_bytes, 64); +BTRFS_SETGET_FUNCS(file_extent_compression, struct btrfs_file_extent_item, + compression, 8); +BTRFS_SETGET_FUNCS(file_extent_encryption, struct btrfs_file_extent_item, + encryption, 8); +BTRFS_SETGET_FUNCS(file_extent_other_encoding, struct btrfs_file_extent_item, + other_encoding, 16); + +/* this returns the number of file bytes represented by the inline item. + * If an item is compressed, this is the uncompressed size + */ +static inline u32 btrfs_file_extent_inline_len(struct extent_buffer *eb, + struct btrfs_file_extent_item *e) +{ + return btrfs_file_extent_ram_bytes(eb, e); +} + +/* + * this returns the number of bytes used by the item on disk, minus the + * size of any extent headers. If a file is compressed on disk, this is + * the compressed size + */ +static inline u32 btrfs_file_extent_inline_item_len(struct extent_buffer *eb, + struct btrfs_item *e) +{ + unsigned long offset; + offset = offsetof(struct btrfs_file_extent_item, disk_bytenr); + return btrfs_item_size(eb, e) - offset; +} static inline struct btrfs_root *btrfs_sb(struct super_block *sb) { @@ -1745,10 +1805,11 @@ int btrfs_lookup_inode(struct btrfs_trans_handle *trans, struct btrfs_root int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, struct bio *bio); int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - u64 objectid, u64 pos, u64 disk_offset, - u64 disk_num_bytes, - u64 num_bytes, u64 offset); + struct btrfs_root *root, + u64 objectid, u64 pos, + u64 disk_offset, u64 disk_num_bytes, + u64 num_bytes, u64 offset, u64 ram_bytes, + u8 compression, u8 encryption, u16 other_encoding); int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 objectid, @@ -1758,6 +1819,8 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, struct btrfs_ordered_sum *sums); int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, struct bio *bio); +int btrfs_csum_file_bytes(struct btrfs_root *root, struct inode *inode, + u64 start, unsigned long len); struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, @@ -1799,7 +1862,7 @@ void btrfs_invalidate_dcache_root(struct btrfs_root *root, char *name, int namelen); int btrfs_merge_bio_hook(struct page *page, unsigned long offset, - size_t size, struct bio *bio); + size_t size, struct bio *bio, unsigned long bio_flags); unsigned long btrfs_force_ra(struct address_space *mapping, struct file_ra_state *ra, struct file *file, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 0be044bb619..dc95f636a11 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -83,6 +83,7 @@ struct async_submit_bio { extent_submit_bio_hook_t *submit_bio_hook; int rw; int mirror_num; + unsigned long bio_flags; struct btrfs_work work; }; @@ -115,6 +116,7 @@ struct extent_map *btree_get_extent(struct inode *inode, struct page *page, } em->start = 0; em->len = (u64)-1; + em->block_len = (u64)-1; em->block_start = 0; em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; @@ -469,12 +471,13 @@ static void run_one_async_submit(struct btrfs_work *work) wake_up(&fs_info->async_submit_wait); async->submit_bio_hook(async->inode, async->rw, async->bio, - async->mirror_num); + async->mirror_num, async->bio_flags); kfree(async); } int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, int rw, struct bio *bio, int mirror_num, + unsigned long bio_flags, extent_submit_bio_hook_t *submit_bio_hook) { struct async_submit_bio *async; @@ -491,6 +494,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, async->submit_bio_hook = submit_bio_hook; async->work.func = run_one_async_submit; async->work.flags = 0; + async->bio_flags = bio_flags; while(atomic_read(&fs_info->async_submit_draining) && atomic_read(&fs_info->nr_async_submits)) { @@ -530,7 +534,7 @@ static int btree_csum_one_bio(struct bio *bio) } static int __btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, - int mirror_num) + int mirror_num, unsigned long bio_flags) { struct btrfs_root *root = BTRFS_I(inode)->root; int ret; @@ -556,17 +560,17 @@ static int __btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, } static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, - int mirror_num) + int mirror_num, unsigned long bio_flags) { /* * kthread helpers are used to submit writes so that checksumming * can happen in parallel across all CPUs */ if (!(rw & (1 << BIO_RW))) { - return __btree_submit_bio_hook(inode, rw, bio, mirror_num); + return __btree_submit_bio_hook(inode, rw, bio, mirror_num, 0); } return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, - inode, rw, bio, mirror_num, + inode, rw, bio, mirror_num, 0, __btree_submit_bio_hook); } @@ -1407,6 +1411,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, fs_info->btree_inode = new_inode(sb); fs_info->btree_inode->i_ino = 1; fs_info->btree_inode->i_nlink = 1; + fs_info->thread_pool_size = min(num_online_cpus() + 2, 8); INIT_LIST_HEAD(&fs_info->ordered_extents); @@ -1508,6 +1513,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, */ btrfs_init_workers(&fs_info->workers, "worker", fs_info->thread_pool_size); + btrfs_init_workers(&fs_info->submit_workers, "submit", min_t(u64, fs_devices->num_devices, fs_info->thread_pool_size)); @@ -1559,6 +1565,8 @@ struct btrfs_root *open_ctree(struct super_block *sb, } fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); + fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, + 4 * 1024 * 1024 / PAGE_CACHE_SIZE); nodesize = btrfs_super_nodesize(disk_super); leafsize = btrfs_super_leafsize(disk_super); diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index f84f5058dbb..4eb1f1408d2 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -71,6 +71,7 @@ int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, int metadata); int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, int rw, struct bio *bio, int mirror_num, + unsigned long bio_flags, extent_submit_bio_hook_t *submit_bio_hook); int btrfs_congested_async(struct btrfs_fs_info *info, int iodone); unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 280ac1aa9b6..bbf04e80a1a 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3278,6 +3278,7 @@ static int noinline relocate_data_extent(struct inode *reloc_inode, em->start = extent_key->objectid - offset; em->len = extent_key->offset; + em->block_len = extent_key->offset; em->block_start = extent_key->objectid; em->bdev = root->fs_info->fs_devices->latest_bdev; set_bit(EXTENT_FLAG_PINNED, &em->flags); @@ -3314,10 +3315,14 @@ struct btrfs_ref_path { }; struct disk_extent { + u64 ram_bytes; u64 disk_bytenr; u64 disk_num_bytes; u64 offset; u64 num_bytes; + u8 compression; + u8 encryption; + u16 other_encoding; }; static int is_cowonly_root(u64 root_objectid) @@ -3631,6 +3636,11 @@ static int noinline get_new_locations(struct inode *reloc_inode, btrfs_file_extent_disk_num_bytes(leaf, fi); exts[nr].offset = btrfs_file_extent_offset(leaf, fi); exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi); + exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); + exts[nr].compression = btrfs_file_extent_compression(leaf, fi); + exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi); + exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf, + fi); WARN_ON(exts[nr].offset > 0); WARN_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes); @@ -3846,6 +3856,8 @@ next: new_extents[0].disk_bytenr); btrfs_set_file_extent_disk_num_bytes(leaf, fi, new_extents[0].disk_num_bytes); + btrfs_set_file_extent_ram_bytes(leaf, fi, + new_extents[0].ram_bytes); ext_offset += new_extents[0].offset; btrfs_set_file_extent_offset(leaf, fi, ext_offset); btrfs_mark_buffer_dirty(leaf); @@ -3911,6 +3923,16 @@ next: new_extents[i].disk_bytenr); btrfs_set_file_extent_disk_num_bytes(leaf, fi, new_extents[i].disk_num_bytes); + btrfs_set_file_extent_ram_bytes(leaf, fi, + new_extents[i].ram_bytes); + + btrfs_set_file_extent_compression(leaf, fi, + new_extents[i].compression); + btrfs_set_file_extent_encryption(leaf, fi, + new_extents[i].encryption); + btrfs_set_file_extent_other_encoding(leaf, fi, + new_extents[i].other_encoding); + btrfs_set_file_extent_num_bytes(leaf, fi, extent_len); ext_offset += new_extents[i].offset; @@ -4169,6 +4191,8 @@ static int noinline replace_extents_in_leaf(struct btrfs_trans_handle *trans, ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes; btrfs_set_file_extent_generation(leaf, fi, trans->transid); + btrfs_set_file_extent_ram_bytes(leaf, fi, + new_extent->ram_bytes); btrfs_set_file_extent_disk_bytenr(leaf, fi, new_extent->disk_bytenr); btrfs_set_file_extent_disk_num_bytes(leaf, fi, @@ -4847,7 +4871,8 @@ static struct inode noinline *create_reloc_inode(struct btrfs_fs_info *fs_info, BUG_ON(err); err = btrfs_insert_file_extent(trans, root, objectid, 0, 0, 0, - group->key.offset, 0); + group->key.offset, 0, group->key.offset, + 0, 0, 0); BUG_ON(err); inode = btrfs_iget_locked(root->fs_info->sb, objectid, root); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 563b2d12f4f..314041fdfa4 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -30,6 +30,7 @@ static struct kmem_cache *extent_buffer_cache; static LIST_HEAD(buffers); static LIST_HEAD(states); +#define LEAK_DEBUG 1 #ifdef LEAK_DEBUG static spinlock_t leak_lock = SPIN_LOCK_UNLOCKED; #endif @@ -1067,8 +1068,8 @@ EXPORT_SYMBOL(find_first_extent_bit_state); * * 1 is returned if we find something, 0 if nothing was in the tree */ -static noinline u64 find_lock_delalloc_range(struct extent_io_tree *tree, - u64 *start, u64 *end, u64 max_bytes) +static noinline u64 find_delalloc_range(struct extent_io_tree *tree, + u64 *start, u64 *end, u64 max_bytes) { struct rb_node *node; struct extent_state *state; @@ -1077,11 +1078,11 @@ static noinline u64 find_lock_delalloc_range(struct extent_io_tree *tree, u64 total_bytes = 0; spin_lock_irq(&tree->lock); + /* * this search will find all the extents that end after * our range starts. */ -search_again: node = tree_search(tree, cur_start); if (!node) { if (!found) @@ -1100,40 +1101,6 @@ search_again: *end = state->end; goto out; } - if (!found && !(state->state & EXTENT_BOUNDARY)) { - struct extent_state *prev_state; - struct rb_node *prev_node = node; - while(1) { - prev_node = rb_prev(prev_node); - if (!prev_node) - break; - prev_state = rb_entry(prev_node, - struct extent_state, - rb_node); - if ((prev_state->end + 1 != state->start) || - !(prev_state->state & EXTENT_DELALLOC)) - break; - if ((cur_start - prev_state->start) * 2 > - max_bytes) - break; - state = prev_state; - node = prev_node; - } - } - if (state->state & EXTENT_LOCKED) { - DEFINE_WAIT(wait); - atomic_inc(&state->refs); - prepare_to_wait(&state->wq, &wait, - TASK_UNINTERRUPTIBLE); - spin_unlock_irq(&tree->lock); - schedule(); - spin_lock_irq(&tree->lock); - finish_wait(&state->wq, &wait); - free_extent_state(state); - goto search_again; - } - set_state_cb(tree, state, EXTENT_LOCKED); - state->state |= EXTENT_LOCKED; if (!found) *start = state->start; found++; @@ -1151,6 +1118,208 @@ out: return found; } +static noinline int __unlock_for_delalloc(struct inode *inode, + struct page *locked_page, + u64 start, u64 end) +{ + int ret; + struct page *pages[16]; + unsigned long index = start >> PAGE_CACHE_SHIFT; + unsigned long end_index = end >> PAGE_CACHE_SHIFT; + unsigned long nr_pages = end_index - index + 1; + int i; + + if (index == locked_page->index && end_index == index) + return 0; + + while(nr_pages > 0) { + ret = find_get_pages_contig(inode->i_mapping, index, + min(nr_pages, ARRAY_SIZE(pages)), pages); + for (i = 0; i < ret; i++) { + if (pages[i] != locked_page) + unlock_page(pages[i]); + page_cache_release(pages[i]); + } + nr_pages -= ret; + index += ret; + cond_resched(); + } + return 0; +} + +static noinline int lock_delalloc_pages(struct inode *inode, + struct page *locked_page, + u64 delalloc_start, + u64 delalloc_end) +{ + unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT; + unsigned long start_index = index; + unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT; + unsigned long pages_locked = 0; + struct page *pages[16]; + unsigned long nrpages; + int ret; + int i; + + /* the caller is responsible for locking the start index */ + if (index == locked_page->index && index == end_index) + return 0; + + /* skip the page at the start index */ + nrpages = end_index - index + 1; + while(nrpages > 0) { + ret = find_get_pages_contig(inode->i_mapping, index, + min(nrpages, ARRAY_SIZE(pages)), pages); + if (ret == 0) { + ret = -EAGAIN; + goto done; + } + /* now we have an array of pages, lock them all */ + for (i = 0; i < ret; i++) { + /* + * the caller is taking responsibility for + * locked_page + */ + if (pages[i] != locked_page) + lock_page(pages[i]); + page_cache_release(pages[i]); + } + pages_locked += ret; + nrpages -= ret; + index += ret; + cond_resched(); + } + ret = 0; +done: + if (ret && pages_locked) { + __unlock_for_delalloc(inode, locked_page, + delalloc_start, + ((u64)(start_index + pages_locked - 1)) << + PAGE_CACHE_SHIFT); + } + return ret; +} + +/* + * find a contiguous range of bytes in the file marked as delalloc, not + * more than 'max_bytes'. start and end are used to return the range, + * + * 1 is returned if we find something, 0 if nothing was in the tree + */ +static noinline u64 find_lock_delalloc_range(struct inode *inode, + struct extent_io_tree *tree, + struct page *locked_page, + u64 *start, u64 *end, + u64 max_bytes) +{ + u64 delalloc_start; + u64 delalloc_end; + u64 found; + int ret; + int loops = 0; + +again: + /* step one, find a bunch of delalloc bytes starting at start */ + delalloc_start = *start; + delalloc_end = 0; + found = find_delalloc_range(tree, &delalloc_start, &delalloc_end, + max_bytes); + if (!found) { + *start = delalloc_start; + *end = delalloc_end; + return found; + } + + /* + * make sure to limit the number of pages we try to lock down + * if we're looping. + */ + if (delalloc_end + 1 - delalloc_start > max_bytes && loops) { + delalloc_end = (delalloc_start + PAGE_CACHE_SIZE - 1) & + ~((u64)PAGE_CACHE_SIZE - 1); + } + /* step two, lock all the pages after the page that has start */ + ret = lock_delalloc_pages(inode, locked_page, + delalloc_start, delalloc_end); + if (ret == -EAGAIN) { + /* some of the pages are gone, lets avoid looping by + * shortening the size of the delalloc range we're searching + */ + if (!loops) { + unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1); + max_bytes = PAGE_CACHE_SIZE - offset; + loops = 1; + goto again; + } else { + found = 0; + goto out_failed; + } + } + BUG_ON(ret); + + /* step three, lock the state bits for the whole range */ + lock_extent(tree, delalloc_start, delalloc_end, GFP_NOFS); + + /* then test to make sure it is all still delalloc */ + ret = test_range_bit(tree, delalloc_start, delalloc_end, + EXTENT_DELALLOC, 1); + if (!ret) { + unlock_extent(tree, delalloc_start, delalloc_end, GFP_NOFS); + __unlock_for_delalloc(inode, locked_page, + delalloc_start, delalloc_end); + cond_resched(); + goto again; + } + *start = delalloc_start; + *end = delalloc_end; +out_failed: + return found; +} + +int extent_clear_unlock_delalloc(struct inode *inode, + struct extent_io_tree *tree, + u64 start, u64 end, struct page *locked_page, + int clear_dirty, int set_writeback, + int end_writeback) +{ + int ret; + struct page *pages[16]; + unsigned long index = start >> PAGE_CACHE_SHIFT; + unsigned long end_index = end >> PAGE_CACHE_SHIFT; + unsigned long nr_pages = end_index - index + 1; + int i; + int clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC; + + if (clear_dirty) + clear_bits |= EXTENT_DIRTY; + + clear_extent_bit(tree, start, end, clear_bits, 1, 0, GFP_NOFS); + + while(nr_pages > 0) { + ret = find_get_pages_contig(inode->i_mapping, index, + min(nr_pages, ARRAY_SIZE(pages)), pages); + for (i = 0; i < ret; i++) { + if (pages[i] == locked_page) { + page_cache_release(pages[i]); + continue; + } + if (clear_dirty) + clear_page_dirty_for_io(pages[i]); + if (set_writeback) + set_page_writeback(pages[i]); + if (end_writeback) + end_page_writeback(pages[i]); + unlock_page(pages[i]); + page_cache_release(pages[i]); + } + nr_pages -= ret; + index += ret; + cond_resched(); + } + return 0; +} +EXPORT_SYMBOL(extent_clear_unlock_delalloc); + /* * count the number of bytes in the tree that have a given bit(s) * set. This can be fairly slow, except for EXTENT_DIRTY which is @@ -1631,38 +1800,26 @@ extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, return bio; } -static int submit_one_bio(int rw, struct bio *bio, int mirror_num) +static int submit_one_bio(int rw, struct bio *bio, int mirror_num, + unsigned long bio_flags) { int ret = 0; struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; struct page *page = bvec->bv_page; struct extent_io_tree *tree = bio->bi_private; - struct rb_node *node; - struct extent_state *state; u64 start; u64 end; start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset; end = start + bvec->bv_len - 1; - spin_lock_irq(&tree->lock); - node = __etree_search(tree, start, NULL, NULL); - BUG_ON(!node); - state = rb_entry(node, struct extent_state, rb_node); - while(state->end < end) { - node = rb_next(node); - state = rb_entry(node, struct extent_state, rb_node); - } - BUG_ON(state->end != end); - spin_unlock_irq(&tree->lock); - bio->bi_private = NULL; bio_get(bio); if (tree->ops && tree->ops->submit_bio_hook) tree->ops->submit_bio_hook(page->mapping->host, rw, bio, - mirror_num); + mirror_num, bio_flags); else submit_bio(rw, bio); if (bio_flagged(bio, BIO_EOPNOTSUPP)) @@ -1678,39 +1835,56 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, struct bio **bio_ret, unsigned long max_pages, bio_end_io_t end_io_func, - int mirror_num) + int mirror_num, + unsigned long prev_bio_flags, + unsigned long bio_flags) { int ret = 0; struct bio *bio; int nr; + int contig = 0; + int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED; + int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED; + size_t page_size = min(size, PAGE_CACHE_SIZE); if (bio_ret && *bio_ret) { bio = *bio_ret; - if (bio->bi_sector + (bio->bi_size >> 9) != sector || + if (old_compressed) + contig = bio->bi_sector == sector; + else + contig = bio->bi_sector + (bio->bi_size >> 9) == + sector; + + if (prev_bio_flags != bio_flags || !contig || (tree->ops && tree->ops->merge_bio_hook && - tree->ops->merge_bio_hook(page, offset, size, bio)) || - bio_add_page(bio, page, size, offset) < size) { - ret = submit_one_bio(rw, bio, mirror_num); + tree->ops->merge_bio_hook(page, offset, page_size, bio, + bio_flags)) || + bio_add_page(bio, page, page_size, offset) < page_size) { + ret = submit_one_bio(rw, bio, mirror_num, + prev_bio_flags); bio = NULL; } else { return 0; } } - nr = bio_get_nr_vecs(bdev); + if (this_compressed) + nr = BIO_MAX_PAGES; + else + nr = bio_get_nr_vecs(bdev); + bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH); if (!bio) { printk("failed to allocate bio nr %d\n", nr); } - - bio_add_page(bio, page, size, offset); + bio_add_page(bio, page, page_size, offset); bio->bi_end_io = end_io_func; bio->bi_private = tree; if (bio_ret) { *bio_ret = bio; } else { - ret = submit_one_bio(rw, bio, mirror_num); + ret = submit_one_bio(rw, bio, mirror_num, bio_flags); } return ret; @@ -1738,7 +1912,8 @@ void set_page_extent_head(struct page *page, unsigned long len) static int __extent_read_full_page(struct extent_io_tree *tree, struct page *page, get_extent_t *get_extent, - struct bio **bio, int mirror_num) + struct bio **bio, int mirror_num, + unsigned long *bio_flags) { struct inode *inode = page->mapping->host; u64 start = (u64)page->index << PAGE_CACHE_SHIFT; @@ -1756,13 +1931,27 @@ static int __extent_read_full_page(struct extent_io_tree *tree, int nr = 0; size_t page_offset = 0; size_t iosize; + size_t disk_io_size; size_t blocksize = inode->i_sb->s_blocksize; + unsigned long this_bio_flag = 0; set_page_extent_mapped(page); end = page_end; lock_extent(tree, start, end, GFP_NOFS); + if (page->index == last_byte >> PAGE_CACHE_SHIFT) { + char *userpage; + size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1); + + if (zero_offset) { + iosize = PAGE_CACHE_SIZE - zero_offset; + userpage = kmap_atomic(page, KM_USER0); + memset(userpage + zero_offset, 0, iosize); + flush_dcache_page(page); + kunmap_atomic(userpage, KM_USER0); + } + } while (cur <= end) { if (cur >= last_byte) { char *userpage; @@ -1793,10 +1982,19 @@ printk("2bad mapping end %Lu cur %Lu\n", end, cur); } BUG_ON(end < cur); + if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) + this_bio_flag = EXTENT_BIO_COMPRESSED; + iosize = min(extent_map_end(em) - cur, end - cur + 1); cur_end = min(extent_map_end(em) - 1, end); iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1); - sector = (em->block_start + extent_offset) >> 9; + if (this_bio_flag & EXTENT_BIO_COMPRESSED) { + disk_io_size = em->block_len; + sector = em->block_start >> 9; + } else { + sector = (em->block_start + extent_offset) >> 9; + disk_io_size = iosize; + } bdev = em->bdev; block_start = em->block_start; free_extent_map(em); @@ -1845,10 +2043,13 @@ printk("2bad mapping end %Lu cur %Lu\n", end, cur); unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1; pnr -= page->index; ret = submit_extent_page(READ, tree, page, - sector, iosize, page_offset, + sector, disk_io_size, page_offset, bdev, bio, pnr, - end_bio_extent_readpage, mirror_num); + end_bio_extent_readpage, mirror_num, + *bio_flags, + this_bio_flag); nr++; + *bio_flags = this_bio_flag; } if (ret) SetPageError(page); @@ -1867,11 +2068,13 @@ int extent_read_full_page(struct extent_io_tree *tree, struct page *page, get_extent_t *get_extent) { struct bio *bio = NULL; + unsigned long bio_flags = 0; int ret; - ret = __extent_read_full_page(tree, page, get_extent, &bio, 0); + ret = __extent_read_full_page(tree, page, get_extent, &bio, 0, + &bio_flags); if (bio) - submit_one_bio(READ, bio, 0); + submit_one_bio(READ, bio, 0, bio_flags); return ret; } EXPORT_SYMBOL(extent_read_full_page); @@ -1909,6 +2112,8 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, unsigned long end_index = i_size >> PAGE_CACHE_SHIFT; u64 nr_delalloc; u64 delalloc_end; + int page_started; + int compressed; WARN_ON(!PageLocked(page)); pg_offset = i_size & (PAGE_CACHE_SIZE - 1); @@ -1934,27 +2139,33 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, delalloc_start = start; delalloc_end = 0; + page_started = 0; while(delalloc_end < page_end) { - nr_delalloc = find_lock_delalloc_range(tree, &delalloc_start, + nr_delalloc = find_lock_delalloc_range(inode, tree, + page, + &delalloc_start, &delalloc_end, 128 * 1024 * 1024); if (nr_delalloc == 0) { delalloc_start = delalloc_end + 1; continue; } - tree->ops->fill_delalloc(inode, delalloc_start, - delalloc_end); - clear_extent_bit(tree, delalloc_start, - delalloc_end, - EXTENT_LOCKED | EXTENT_DELALLOC, - 1, 0, GFP_NOFS); + tree->ops->fill_delalloc(inode, page, delalloc_start, + delalloc_end, &page_started); delalloc_start = delalloc_end + 1; } + + /* did the fill delalloc function already unlock and start the IO? */ + if (page_started) { + return 0; + } + lock_extent(tree, start, page_end, GFP_NOFS); unlock_start = start; if (tree->ops && tree->ops->writepage_start_hook) { - ret = tree->ops->writepage_start_hook(page, start, page_end); + ret = tree->ops->writepage_start_hook(page, start, + page_end); if (ret == -EAGAIN) { unlock_extent(tree, start, page_end, GFP_NOFS); redirty_page_for_writepage(wbc, page); @@ -2006,10 +2217,15 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, sector = (em->block_start + extent_offset) >> 9; bdev = em->bdev; block_start = em->block_start; + compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags); free_extent_map(em); em = NULL; - if (block_start == EXTENT_MAP_HOLE || + /* + * compressed and inline extents are written through other + * paths in the FS + */ + if (compressed || block_start == EXTENT_MAP_HOLE || block_start == EXTENT_MAP_INLINE) { clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS); @@ -2017,16 +2233,28 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, unlock_extent(tree, unlock_start, cur + iosize -1, GFP_NOFS); - if (tree->ops && tree->ops->writepage_end_io_hook) + /* + * end_io notification does not happen here for + * compressed extents + */ + if (!compressed && tree->ops && + tree->ops->writepage_end_io_hook) tree->ops->writepage_end_io_hook(page, cur, cur + iosize - 1, NULL, 1); - cur = cur + iosize; + else if (compressed) { + /* we don't want to end_page_writeback on + * a compressed extent. this happens + * elsewhere + */ + nr++; + } + + cur += iosize; pg_offset += iosize; unlock_start = cur; continue; } - /* leave this out until we have a page_mkwrite call */ if (0 && !test_range_bit(tree, cur, cur + iosize - 1, EXTENT_DIRTY, 0)) { @@ -2034,6 +2262,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, pg_offset += iosize; continue; } + clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS); if (tree->ops && tree->ops->writepage_io_hook) { ret = tree->ops->writepage_io_hook(page, cur, @@ -2057,7 +2286,8 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, ret = submit_extent_page(WRITE, tree, page, sector, iosize, pg_offset, bdev, &epd->bio, max_nr, - end_bio_extent_writepage, 0); + end_bio_extent_writepage, + 0, 0, 0); if (ret) SetPageError(page); } @@ -2226,7 +2456,7 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page, extent_write_cache_pages(tree, mapping, &wbc_writepages, __extent_writepage, &epd); if (epd.bio) { - submit_one_bio(WRITE, epd.bio, 0); + submit_one_bio(WRITE, epd.bio, 0, 0); } return ret; } @@ -2248,7 +2478,7 @@ int extent_writepages(struct extent_io_tree *tree, ret = extent_write_cache_pages(tree, mapping, wbc, __extent_writepage, &epd); if (epd.bio) { - submit_one_bio(WRITE, epd.bio, 0); + submit_one_bio(WRITE, epd.bio, 0, 0); } return ret; } @@ -2262,6 +2492,7 @@ int extent_readpages(struct extent_io_tree *tree, struct bio *bio = NULL; unsigned page_idx; struct pagevec pvec; + unsigned long bio_flags = 0; pagevec_init(&pvec, 0); for (page_idx = 0; page_idx < nr_pages; page_idx++) { @@ -2281,7 +2512,7 @@ int extent_readpages(struct extent_io_tree *tree, if (!pagevec_add(&pvec, page)) __pagevec_lru_add(&pvec); __extent_read_full_page(tree, page, get_extent, - &bio, 0); + &bio, 0, &bio_flags); } page_cache_release(page); } @@ -2289,7 +2520,7 @@ int extent_readpages(struct extent_io_tree *tree, __pagevec_lru_add(&pvec); BUG_ON(!list_empty(pages)); if (bio) - submit_one_bio(READ, bio, 0); + submit_one_bio(READ, bio, 0, bio_flags); return 0; } EXPORT_SYMBOL(extent_readpages); @@ -2414,7 +2645,8 @@ int extent_prepare_write(struct extent_io_tree *tree, ret = submit_extent_page(READ, tree, page, sector, iosize, page_offset, em->bdev, NULL, 1, - end_bio_extent_preparewrite, 0); + end_bio_extent_preparewrite, 0, + 0, 0); iocount++; block_start = block_start + iosize; } else { @@ -2495,7 +2727,9 @@ int try_release_extent_mapping(struct extent_map_tree *map, } if (!test_range_bit(tree, em->start, extent_map_end(em) - 1, - EXTENT_LOCKED, 0)) { + EXTENT_LOCKED | EXTENT_WRITEBACK | + EXTENT_ORDERED, + 0)) { remove_extent_mapping(map, em); /* once for the rb tree */ free_extent_map(em); @@ -2923,6 +3157,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, int inc_all_pages = 0; unsigned long num_pages; struct bio *bio = NULL; + unsigned long bio_flags = 0; if (eb->flags & EXTENT_UPTODATE) return 0; @@ -2973,7 +3208,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, ClearPageError(page); err = __extent_read_full_page(tree, page, get_extent, &bio, - mirror_num); + mirror_num, &bio_flags); if (err) { ret = err; printk("err %d from __extent_read_full_page\n", ret); @@ -2984,7 +3219,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, } if (bio) - submit_one_bio(READ, bio, mirror_num); + submit_one_bio(READ, bio, mirror_num, bio_flags); if (ret || !wait) { if (ret) diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index c9d1908a1ae..86f859b87a6 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -18,6 +18,9 @@ #define EXTENT_BOUNDARY (1 << 11) #define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK) +/* flags for bio submission */ +#define EXTENT_BIO_COMPRESSED 1 + /* * page->private values. Every page that is controlled by the extent * map has page->private set to one. @@ -28,14 +31,17 @@ struct extent_state; typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw, - struct bio *bio, int mirror_num); + struct bio *bio, int mirror_num, + unsigned long bio_flags); struct extent_io_ops { - int (*fill_delalloc)(struct inode *inode, u64 start, u64 end); + int (*fill_delalloc)(struct inode *inode, struct page *locked_page, + u64 start, u64 end, int *page_started); int (*writepage_start_hook)(struct page *page, u64 start, u64 end); int (*writepage_io_hook)(struct page *page, u64 start, u64 end); extent_submit_bio_hook_t *submit_bio_hook; int (*merge_bio_hook)(struct page *page, unsigned long offset, - size_t size, struct bio *bio); + size_t size, struct bio *bio, + unsigned long bio_flags); int (*readpage_io_hook)(struct page *page, u64 start, u64 end); int (*readpage_io_failed_hook)(struct bio *bio, struct page *page, u64 start, u64 end, @@ -245,4 +251,9 @@ void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km); int release_extent_buffer_tail_pages(struct extent_buffer *eb); int extent_range_uptodate(struct extent_io_tree *tree, u64 start, u64 end); +int extent_clear_unlock_delalloc(struct inode *inode, + struct extent_io_tree *tree, + u64 start, u64 end, struct page *locked_page, + int clear_dirty, int set_writeback, + int clear_writeback); #endif diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 74b2a29880d..fd3ebfb8c3c 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -184,6 +184,13 @@ static int mergable_maps(struct extent_map *prev, struct extent_map *next) if (test_bit(EXTENT_FLAG_PINNED, &prev->flags)) return 0; + /* + * don't merge compressed extents, we need to know their + * actual size + */ + if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags)) + return 0; + if (extent_map_end(prev) == next->start && prev->flags == next->flags && prev->bdev == next->bdev && @@ -239,6 +246,7 @@ int add_extent_mapping(struct extent_map_tree *tree, if (rb && mergable_maps(merge, em)) { em->start = merge->start; em->len += merge->len; + em->block_len += merge->block_len; em->block_start = merge->block_start; merge->in_tree = 0; rb_erase(&merge->rb_node, &tree->map); @@ -250,6 +258,7 @@ int add_extent_mapping(struct extent_map_tree *tree, merge = rb_entry(rb, struct extent_map, rb_node); if (rb && mergable_maps(em, merge)) { em->len += merge->len; + em->block_len += merge->len; rb_erase(&merge->rb_node, &tree->map); merge->in_tree = 0; free_extent_map(merge); diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h index 26ac6fe0b26..abbcbeb28c7 100644 --- a/fs/btrfs/extent_map.h +++ b/fs/btrfs/extent_map.h @@ -10,6 +10,7 @@ /* bits for the flags field */ #define EXTENT_FLAG_PINNED 0 /* this entry not yet on disk, don't free it */ +#define EXTENT_FLAG_COMPRESSED 1 struct extent_map { struct rb_node rb_node; @@ -18,6 +19,7 @@ struct extent_map { u64 start; u64 len; u64 block_start; + u64 block_len; unsigned long flags; struct block_device *bdev; atomic_t refs; @@ -38,9 +40,9 @@ static inline u64 extent_map_end(struct extent_map *em) static inline u64 extent_map_block_end(struct extent_map *em) { - if (em->block_start + em->len < em->block_start) + if (em->block_start + em->block_len < em->block_start) return (u64)-1; - return em->block_start + em->len; + return em->block_start + em->block_len; } void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask); diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 6dbe88b9d7d..f4d3fa71bc4 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -31,7 +31,8 @@ int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 objectid, u64 pos, u64 disk_offset, u64 disk_num_bytes, - u64 num_bytes, u64 offset) + u64 num_bytes, u64 offset, u64 ram_bytes, + u8 compression, u8 encryption, u16 other_encoding) { int ret = 0; struct btrfs_file_extent_item *item; @@ -57,8 +58,13 @@ int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, btrfs_set_file_extent_disk_num_bytes(leaf, item, disk_num_bytes); btrfs_set_file_extent_offset(leaf, item, offset); btrfs_set_file_extent_num_bytes(leaf, item, num_bytes); + btrfs_set_file_extent_ram_bytes(leaf, item, ram_bytes); btrfs_set_file_extent_generation(leaf, item, trans->transid); btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG); + btrfs_set_file_extent_compression(leaf, item, compression); + btrfs_set_file_extent_encryption(leaf, item, encryption); + btrfs_set_file_extent_other_encoding(leaf, item, other_encoding); + btrfs_mark_buffer_dirty(leaf); out: btrfs_free_path(path); @@ -213,6 +219,73 @@ found: return 0; } +int btrfs_csum_file_bytes(struct btrfs_root *root, struct inode *inode, + u64 start, unsigned long len) +{ + struct btrfs_ordered_sum *sums; + struct btrfs_sector_sum *sector_sum; + struct btrfs_ordered_extent *ordered; + char *data; + struct page *page; + unsigned long total_bytes = 0; + unsigned long this_sum_bytes = 0; + + sums = kzalloc(btrfs_ordered_sum_size(root, len), GFP_NOFS); + if (!sums) + return -ENOMEM; + + sector_sum = sums->sums; + sums->file_offset = start; + sums->len = len; + INIT_LIST_HEAD(&sums->list); + ordered = btrfs_lookup_ordered_extent(inode, sums->file_offset); + BUG_ON(!ordered); + + while(len > 0) { + if (start >= ordered->file_offset + ordered->len || + start < ordered->file_offset) { + sums->len = this_sum_bytes; + this_sum_bytes = 0; + btrfs_add_ordered_sum(inode, ordered, sums); + btrfs_put_ordered_extent(ordered); + + sums = kzalloc(btrfs_ordered_sum_size(root, len), + GFP_NOFS); + BUG_ON(!sums); + sector_sum = sums->sums; + sums->len = len; + sums->file_offset = start; + ordered = btrfs_lookup_ordered_extent(inode, + sums->file_offset); + BUG_ON(!ordered); + } + + page = find_get_page(inode->i_mapping, + start >> PAGE_CACHE_SHIFT); + + data = kmap_atomic(page, KM_USER0); + sector_sum->sum = ~(u32)0; + sector_sum->sum = btrfs_csum_data(root, data, sector_sum->sum, + PAGE_CACHE_SIZE); + kunmap_atomic(data, KM_USER0); + btrfs_csum_final(sector_sum->sum, + (char *)§or_sum->sum); + sector_sum->offset = page_offset(page); + page_cache_release(page); + + sector_sum++; + total_bytes += PAGE_CACHE_SIZE; + this_sum_bytes += PAGE_CACHE_SIZE; + start += PAGE_CACHE_SIZE; + + WARN_ON(len < PAGE_CACHE_SIZE); + len -= PAGE_CACHE_SIZE; + } + btrfs_add_ordered_sum(inode, ordered, sums); + btrfs_put_ordered_extent(ordered); + return 0; +} + int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, struct bio *bio) { diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 69abbe19add..0aa15436590 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -95,153 +95,6 @@ static void noinline btrfs_drop_pages(struct page **pages, size_t num_pages) } } -/* this does all the hard work for inserting an inline extent into - * the btree. Any existing inline extent is extended as required to make room, - * otherwise things are inserted as required into the btree - */ -static int noinline insert_inline_extent(struct btrfs_trans_handle *trans, - struct btrfs_root *root, struct inode *inode, - u64 offset, size_t size, - struct page **pages, size_t page_offset, - int num_pages) -{ - struct btrfs_key key; - struct btrfs_path *path; - struct extent_buffer *leaf; - char *kaddr; - unsigned long ptr; - struct btrfs_file_extent_item *ei; - struct page *page; - u32 datasize; - int err = 0; - int ret; - int i; - ssize_t cur_size; - - path = btrfs_alloc_path(); - if (!path) - return -ENOMEM; - - btrfs_set_trans_block_group(trans, inode); - - key.objectid = inode->i_ino; - key.offset = offset; - btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); - - ret = btrfs_search_slot(trans, root, &key, path, 0, 1); - if (ret < 0) { - err = ret; - goto fail; - } - if (ret == 1) { - struct btrfs_key found_key; - - if (path->slots[0] == 0) - goto insert; - - path->slots[0]--; - leaf = path->nodes[0]; - btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - - if (found_key.objectid != inode->i_ino) - goto insert; - - if (found_key.type != BTRFS_EXTENT_DATA_KEY) - goto insert; - ei = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_file_extent_item); - - if (btrfs_file_extent_type(leaf, ei) != - BTRFS_FILE_EXTENT_INLINE) { - goto insert; - } - btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); - ret = 0; - } - if (ret == 0) { - u32 found_size; - u64 found_end; - - leaf = path->nodes[0]; - ei = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_file_extent_item); - - if (btrfs_file_extent_type(leaf, ei) != - BTRFS_FILE_EXTENT_INLINE) { - err = ret; - btrfs_print_leaf(root, leaf); - printk("found wasn't inline offset %Lu inode %lu\n", - offset, inode->i_ino); - goto fail; - } - found_size = btrfs_file_extent_inline_len(leaf, - btrfs_item_nr(leaf, path->slots[0])); - found_end = key.offset + found_size; - - if (found_end < offset + size) { - btrfs_release_path(root, path); - ret = btrfs_search_slot(trans, root, &key, path, - offset + size - found_end, 1); - BUG_ON(ret != 0); - - ret = btrfs_extend_item(trans, root, path, - offset + size - found_end); - if (ret) { - err = ret; - goto fail; - } - leaf = path->nodes[0]; - ei = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_file_extent_item); - inode_add_bytes(inode, offset + size - found_end); - } - if (found_end < offset) { - ptr = btrfs_file_extent_inline_start(ei) + found_size; - memset_extent_buffer(leaf, 0, ptr, offset - found_end); - } - } else { -insert: - btrfs_release_path(root, path); - datasize = offset + size - key.offset; - inode_add_bytes(inode, datasize); - datasize = btrfs_file_extent_calc_inline_size(datasize); - ret = btrfs_insert_empty_item(trans, root, path, &key, - datasize); - if (ret) { - err = ret; - printk("got bad ret %d\n", ret); - goto fail; - } - leaf = path->nodes[0]; - ei = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_file_extent_item); - btrfs_set_file_extent_generation(leaf, ei, trans->transid); - btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE); - } - ptr = btrfs_file_extent_inline_start(ei) + offset - key.offset; - - cur_size = size; - i = 0; - while (size > 0) { - page = pages[i]; - kaddr = kmap_atomic(page, KM_USER0); - cur_size = min_t(size_t, PAGE_CACHE_SIZE - page_offset, size); - write_extent_buffer(leaf, kaddr + page_offset, ptr, cur_size); - kunmap_atomic(kaddr, KM_USER0); - page_offset = 0; - ptr += cur_size; - size -= cur_size; - if (i >= num_pages) { - printk("i %d num_pages %d\n", i, num_pages); - } - i++; - } - btrfs_mark_buffer_dirty(leaf); -fail: - btrfs_free_path(path); - return err; -} - /* * after copy_from_user, pages need to be dirtied and we need to make * sure holes are created between the current EOF and the start of @@ -267,8 +120,6 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans, u64 start_pos; u64 end_of_last_block; u64 end_pos = pos + write_bytes; - u64 inline_size; - int did_inline = 0; loff_t isize = i_size_read(inode); start_pos = pos & ~((u64)root->sectorsize - 1); @@ -314,7 +165,8 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans, err = btrfs_insert_file_extent(trans, root, inode->i_ino, last_pos_in_file, - 0, 0, hole_size, 0); + 0, 0, hole_size, 0, + hole_size, 0, 0, 0); btrfs_drop_extent_cache(inode, last_pos_in_file, last_pos_in_file + hole_size - 1, 0); mutex_unlock(&BTRFS_I(inode)->extent_mutex); @@ -324,57 +176,19 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans, goto failed; } - /* - * either allocate an extent for the new bytes or setup the key - * to show we are doing inline data in the extent + /* check for reserved extents on each page, we don't want + * to reset the delalloc bit on things that already have + * extents reserved. */ - inline_size = end_pos; - if (isize >= BTRFS_MAX_INLINE_DATA_SIZE(root) || - inline_size > root->fs_info->max_inline || - (inline_size & (root->sectorsize -1)) == 0 || - inline_size >= BTRFS_MAX_INLINE_DATA_SIZE(root)) { - /* check for reserved extents on each page, we don't want - * to reset the delalloc bit on things that already have - * extents reserved. - */ - btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block); - for (i = 0; i < num_pages; i++) { - struct page *p = pages[i]; - SetPageUptodate(p); - ClearPageChecked(p); - set_page_dirty(p); - } - } else { - u64 aligned_end; - /* step one, delete the existing extents in this range */ - aligned_end = (pos + write_bytes + root->sectorsize - 1) & - ~((u64)root->sectorsize - 1); - mutex_lock(&BTRFS_I(inode)->extent_mutex); - err = btrfs_drop_extents(trans, root, inode, start_pos, - aligned_end, aligned_end, &hint_byte); - if (err) - goto failed; - if (isize > inline_size) - inline_size = min_t(u64, isize, aligned_end); - inline_size -= start_pos; - err = insert_inline_extent(trans, root, inode, start_pos, - inline_size, pages, 0, num_pages); - btrfs_drop_extent_cache(inode, start_pos, aligned_end - 1, 0); - BUG_ON(err); - mutex_unlock(&BTRFS_I(inode)->extent_mutex); - - /* - * an ugly way to do all the prop accounting around - * the page bits and mapping tags - */ - set_page_writeback(pages[0]); - end_page_writeback(pages[0]); - did_inline = 1; + btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block); + for (i = 0; i < num_pages; i++) { + struct page *p = pages[i]; + SetPageUptodate(p); + ClearPageChecked(p); + set_page_dirty(p); } if (end_pos > isize) { i_size_write(inode, end_pos); - if (did_inline) - BTRFS_I(inode)->disk_i_size = end_pos; btrfs_update_inode(trans, root, inode); } failed: @@ -399,6 +213,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, int ret; int testend = 1; unsigned long flags; + int compressed = 0; WARN_ON(end < start); if (end == (u64)-1) { @@ -434,6 +249,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, free_extent_map(em); continue; } + compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags); clear_bit(EXTENT_FLAG_PINNED, &em->flags); remove_extent_mapping(em_tree, em); @@ -442,6 +258,12 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, split->start = em->start; split->len = start - em->start; split->block_start = em->block_start; + + if (compressed) + split->block_len = em->block_len; + else + split->block_len = split->len; + split->bdev = em->bdev; split->flags = flags; ret = add_extent_mapping(em_tree, split); @@ -459,7 +281,13 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, split->bdev = em->bdev; split->flags = flags; - split->block_start = em->block_start + diff; + if (compressed) { + split->block_len = em->block_len; + split->block_start = em->block_start; + } else { + split->block_len = split->len; + split->block_start = em->block_start + diff; + } ret = add_extent_mapping(em_tree, split); BUG_ON(ret); @@ -533,7 +361,7 @@ int btrfs_check_file(struct btrfs_root *root, struct inode *inode) struct btrfs_item *item; item = btrfs_item_nr(leaf, slot); extent_end = found_key.offset + - btrfs_file_extent_inline_len(leaf, item); + btrfs_file_extent_inline_len(leaf, extent); extent_end = (extent_end + root->sectorsize - 1) & ~((u64)root->sectorsize -1 ); } @@ -573,6 +401,10 @@ int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans, u64 extent_end = 0; u64 search_start = start; u64 leaf_start; + u64 ram_bytes = 0; + u8 compression = 0; + u8 encryption = 0; + u16 other_encoding = 0; u64 root_gen; u64 root_owner; struct extent_buffer *leaf; @@ -589,6 +421,7 @@ int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans, int recow; int ret; + inline_limit = 0; btrfs_drop_extent_cache(inode, start, end - 1, 0); path = btrfs_alloc_path(); @@ -637,6 +470,12 @@ next_slot: extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); found_type = btrfs_file_extent_type(leaf, extent); + compression = btrfs_file_extent_compression(leaf, + extent); + encryption = btrfs_file_extent_encryption(leaf, + extent); + other_encoding = btrfs_file_extent_other_encoding(leaf, + extent); if (found_type == BTRFS_FILE_EXTENT_REG) { extent_end = btrfs_file_extent_disk_bytenr(leaf, @@ -646,13 +485,13 @@ next_slot: extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, extent); + ram_bytes = btrfs_file_extent_ram_bytes(leaf, + extent); found_extent = 1; } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { - struct btrfs_item *item; - item = btrfs_item_nr(leaf, slot); found_inline = 1; extent_end = key.offset + - btrfs_file_extent_inline_len(leaf, item); + btrfs_file_extent_inline_len(leaf, extent); } } else { extent_end = search_start; @@ -680,10 +519,9 @@ next_slot: search_start = (extent_end + mask) & ~mask; } else search_start = extent_end; - if (end <= extent_end && start >= key.offset && found_inline) { + + if (end <= extent_end && start >= key.offset && found_inline) *hint_byte = EXTENT_MAP_INLINE; - goto out; - } if (found_extent) { read_extent_buffer(leaf, &old, (unsigned long)extent, @@ -770,12 +608,27 @@ next_slot: write_extent_buffer(leaf, &old, (unsigned long)extent, sizeof(old)); + btrfs_set_file_extent_compression(leaf, extent, + compression); + btrfs_set_file_extent_encryption(leaf, extent, + encryption); + btrfs_set_file_extent_other_encoding(leaf, extent, + other_encoding); btrfs_set_file_extent_offset(leaf, extent, le64_to_cpu(old.offset) + end - key.offset); WARN_ON(le64_to_cpu(old.num_bytes) < (extent_end - end)); btrfs_set_file_extent_num_bytes(leaf, extent, extent_end - end); + + /* + * set the ram bytes to the size of the full extent + * before splitting. This is a worst case flag, + * but its the best we can do because we don't know + * how splitting affects compression + */ + btrfs_set_file_extent_ram_bytes(leaf, extent, + ram_bytes); btrfs_set_file_extent_type(leaf, extent, BTRFS_FILE_EXTENT_REG); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index bf4bed6ca4d..9797592dc86 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -49,6 +49,7 @@ #include "compat.h" #include "tree-log.h" #include "ref-cache.h" +#include "compression.h" struct btrfs_iget_args { u64 ino; @@ -83,6 +84,7 @@ static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = { }; static void btrfs_truncate(struct inode *inode); +static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end); /* * a very lame attempt at stopping writes when the FS is 85% full. There @@ -113,58 +115,375 @@ int btrfs_check_free_space(struct btrfs_root *root, u64 num_required, return ret; } +/* + * this does all the hard work for inserting an inline extent into + * the btree. The caller should have done a btrfs_drop_extents so that + * no overlapping inline items exist in the btree + */ +static int noinline insert_inline_extent(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct inode *inode, + u64 start, size_t size, size_t compressed_size, + struct page **compressed_pages) +{ + struct btrfs_key key; + struct btrfs_path *path; + struct extent_buffer *leaf; + struct page *page = NULL; + char *kaddr; + unsigned long ptr; + struct btrfs_file_extent_item *ei; + int err = 0; + int ret; + size_t cur_size = size; + size_t datasize; + unsigned long offset; + int use_compress = 0; + + if (compressed_size && compressed_pages) { + use_compress = 1; + cur_size = compressed_size; + } + + path = btrfs_alloc_path(); if (!path) + return -ENOMEM; + + btrfs_set_trans_block_group(trans, inode); + + key.objectid = inode->i_ino; + key.offset = start; + btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); + inode_add_bytes(inode, size); + datasize = btrfs_file_extent_calc_inline_size(cur_size); + + inode_add_bytes(inode, size); + ret = btrfs_insert_empty_item(trans, root, path, &key, + datasize); + BUG_ON(ret); + if (ret) { + err = ret; + printk("got bad ret %d\n", ret); + goto fail; + } + leaf = path->nodes[0]; + ei = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_file_extent_item); + btrfs_set_file_extent_generation(leaf, ei, trans->transid); + btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE); + btrfs_set_file_extent_encryption(leaf, ei, 0); + btrfs_set_file_extent_other_encoding(leaf, ei, 0); + btrfs_set_file_extent_ram_bytes(leaf, ei, size); + ptr = btrfs_file_extent_inline_start(ei); + + if (use_compress) { + struct page *cpage; + int i = 0; + while(compressed_size > 0) { + cpage = compressed_pages[i]; + cur_size = min(compressed_size, + PAGE_CACHE_SIZE); + + kaddr = kmap(cpage); + write_extent_buffer(leaf, kaddr, ptr, cur_size); + kunmap(cpage); + + i++; + ptr += cur_size; + compressed_size -= cur_size; + } + btrfs_set_file_extent_compression(leaf, ei, + BTRFS_COMPRESS_ZLIB); + } else { + page = find_get_page(inode->i_mapping, + start >> PAGE_CACHE_SHIFT); + btrfs_set_file_extent_compression(leaf, ei, 0); + kaddr = kmap_atomic(page, KM_USER0); + offset = start & (PAGE_CACHE_SIZE - 1); + write_extent_buffer(leaf, kaddr + offset, ptr, size); + kunmap_atomic(kaddr, KM_USER0); + page_cache_release(page); + } + btrfs_mark_buffer_dirty(leaf); + btrfs_free_path(path); + + BTRFS_I(inode)->disk_i_size = inode->i_size; + btrfs_update_inode(trans, root, inode); + return 0; +fail: + btrfs_free_path(path); + return err; +} + + +/* + * conditionally insert an inline extent into the file. This + * does the checks required to make sure the data is small enough + * to fit as an inline extent. + */ +static int cow_file_range_inline(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct inode *inode, u64 start, u64 end, + size_t compressed_size, + struct page **compressed_pages) +{ + u64 isize = i_size_read(inode); + u64 actual_end = min(end + 1, isize); + u64 inline_len = actual_end - start; + u64 aligned_end = (end + root->sectorsize - 1) & + ~((u64)root->sectorsize - 1); + u64 hint_byte; + u64 data_len = inline_len; + int ret; + + if (compressed_size) + data_len = compressed_size; + + if (start > 0 || + data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) || + (!compressed_size && + (actual_end & (root->sectorsize - 1)) == 0) || + end + 1 < isize || + data_len > root->fs_info->max_inline) { + return 1; + } + + mutex_lock(&BTRFS_I(inode)->extent_mutex); + ret = btrfs_drop_extents(trans, root, inode, start, + aligned_end, aligned_end, &hint_byte); + BUG_ON(ret); + + if (isize > actual_end) + inline_len = min_t(u64, isize, actual_end); + ret = insert_inline_extent(trans, root, inode, start, + inline_len, compressed_size, + compressed_pages); + BUG_ON(ret); + btrfs_drop_extent_cache(inode, start, aligned_end, 0); + mutex_unlock(&BTRFS_I(inode)->extent_mutex); + return 0; +} + /* * when extent_io.c finds a delayed allocation range in the file, * the call backs end up in this code. The basic idea is to * allocate extents on disk for the range, and create ordered data structs * in ram to track those extents. + * + * locked_page is the page that writepage had locked already. We use + * it to make sure we don't do extra locks or unlocks. + * + * *page_started is set to one if we unlock locked_page and do everything + * required to start IO on it. It may be clean and already done with + * IO when we return. */ -static int cow_file_range(struct inode *inode, u64 start, u64 end) +static int cow_file_range(struct inode *inode, struct page *locked_page, + u64 start, u64 end, int *page_started) { struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_trans_handle *trans; u64 alloc_hint = 0; u64 num_bytes; + unsigned long ram_size; + u64 orig_start; + u64 disk_num_bytes; u64 cur_alloc_size; u64 blocksize = root->sectorsize; - u64 orig_num_bytes; + u64 actual_end; struct btrfs_key ins; struct extent_map *em; struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; int ret = 0; + struct page **pages = NULL; + unsigned long nr_pages; + unsigned long nr_pages_ret = 0; + unsigned long total_compressed = 0; + unsigned long total_in = 0; + unsigned long max_compressed = 128 * 1024; + unsigned long max_uncompressed = 256 * 1024; + int i; + int will_compress; trans = btrfs_join_transaction(root, 1); BUG_ON(!trans); btrfs_set_trans_block_group(trans, inode); + orig_start = start; + + /* + * compression made this loop a bit ugly, but the basic idea is to + * compress some pages but keep the total size of the compressed + * extent relatively small. If compression is off, this goto target + * is never used. + */ +again: + will_compress = 0; + nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1; + nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE); + actual_end = min_t(u64, i_size_read(inode), end + 1); + total_compressed = actual_end - start; + + /* we want to make sure that amount of ram required to uncompress + * an extent is reasonable, so we limit the total size in ram + * of a compressed extent to 256k + */ + total_compressed = min(total_compressed, max_uncompressed); num_bytes = (end - start + blocksize) & ~(blocksize - 1); num_bytes = max(blocksize, num_bytes); - orig_num_bytes = num_bytes; + disk_num_bytes = num_bytes; + total_in = 0; + ret = 0; - if (alloc_hint == EXTENT_MAP_INLINE) - goto out; + /* we do compression for mount -o compress and when the + * inode has not been flagged as nocompress + */ + if (!btrfs_test_flag(inode, NOCOMPRESS) && + btrfs_test_opt(root, COMPRESS)) { + WARN_ON(pages); + pages = kmalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); + + /* we want to make sure the amount of IO required to satisfy + * a random read is reasonably small, so we limit the size + * of a compressed extent to 128k + */ + ret = btrfs_zlib_compress_pages(inode->i_mapping, start, + total_compressed, pages, + nr_pages, &nr_pages_ret, + &total_in, + &total_compressed, + max_compressed); + + if (!ret) { + unsigned long offset = total_compressed & + (PAGE_CACHE_SIZE - 1); + struct page *page = pages[nr_pages_ret - 1]; + char *kaddr; + + /* zero the tail end of the last page, we might be + * sending it down to disk + */ + if (offset) { + kaddr = kmap_atomic(page, KM_USER0); + memset(kaddr + offset, 0, + PAGE_CACHE_SIZE - offset); + kunmap_atomic(kaddr, KM_USER0); + } + will_compress = 1; + } + } + if (start == 0) { + /* lets try to make an inline extent */ + if (ret || total_in < (end - start + 1)) { + /* we didn't compress the entire range, try + * to make an uncompressed inline extent. This + * is almost sure to fail, but maybe inline sizes + * will get bigger later + */ + ret = cow_file_range_inline(trans, root, inode, + start, end, 0, NULL); + } else { + ret = cow_file_range_inline(trans, root, inode, + start, end, + total_compressed, pages); + } + if (ret == 0) { + extent_clear_unlock_delalloc(inode, + &BTRFS_I(inode)->io_tree, + start, end, NULL, + 1, 1, 1); + *page_started = 1; + ret = 0; + goto free_pages_out; + } + } + + if (will_compress) { + /* + * we aren't doing an inline extent round the compressed size + * up to a block size boundary so the allocator does sane + * things + */ + total_compressed = (total_compressed + blocksize - 1) & + ~(blocksize - 1); + + /* + * one last check to make sure the compression is really a + * win, compare the page count read with the blocks on disk + */ + total_in = (total_in + PAGE_CACHE_SIZE - 1) & + ~(PAGE_CACHE_SIZE - 1); + if (total_compressed >= total_in) { + will_compress = 0; + } else { + disk_num_bytes = total_compressed; + num_bytes = total_in; + } + } + if (!will_compress && pages) { + /* + * the compression code ran but failed to make things smaller, + * free any pages it allocated and our page pointer array + */ + for (i = 0; i < nr_pages_ret; i++) { + page_cache_release(pages[i]); + } + kfree(pages); + pages = NULL; + total_compressed = 0; + nr_pages_ret = 0; + + /* flag the file so we don't compress in the future */ + btrfs_set_flag(inode, NOCOMPRESS); + } + + BUG_ON(disk_num_bytes > + btrfs_super_total_bytes(&root->fs_info->super_copy)); - BUG_ON(num_bytes > btrfs_super_total_bytes(&root->fs_info->super_copy)); mutex_lock(&BTRFS_I(inode)->extent_mutex); btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); mutex_unlock(&BTRFS_I(inode)->extent_mutex); - while(num_bytes > 0) { - cur_alloc_size = min(num_bytes, root->fs_info->max_extent); + while(disk_num_bytes > 0) { + unsigned long min_bytes; + + /* + * the max size of a compressed extent is pretty small, + * make the code a little less complex by forcing + * the allocator to find a whole compressed extent at once + */ + if (will_compress) + min_bytes = disk_num_bytes; + else + min_bytes = root->sectorsize; + + cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent); ret = btrfs_reserve_extent(trans, root, cur_alloc_size, - root->sectorsize, 0, alloc_hint, + min_bytes, 0, alloc_hint, (u64)-1, &ins, 1); if (ret) { WARN_ON(1); - goto out; + goto free_pages_out_fail; } em = alloc_extent_map(GFP_NOFS); em->start = start; - em->len = ins.offset; + + if (will_compress) { + ram_size = num_bytes; + em->len = num_bytes; + } else { + /* ramsize == disk size */ + ram_size = ins.offset; + em->len = ins.offset; + } + em->block_start = ins.objectid; + em->block_len = ins.offset; em->bdev = root->fs_info->fs_devices->latest_bdev; + mutex_lock(&BTRFS_I(inode)->extent_mutex); set_bit(EXTENT_FLAG_PINNED, &em->flags); + + if (will_compress) + set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); + while(1) { spin_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em); @@ -174,26 +493,95 @@ static int cow_file_range(struct inode *inode, u64 start, u64 end) break; } btrfs_drop_extent_cache(inode, start, - start + ins.offset - 1, 0); + start + ram_size - 1, 0); } mutex_unlock(&BTRFS_I(inode)->extent_mutex); cur_alloc_size = ins.offset; ret = btrfs_add_ordered_extent(inode, start, ins.objectid, - ins.offset, 0); + ram_size, cur_alloc_size, 0, + will_compress); BUG_ON(ret); - if (num_bytes < cur_alloc_size) { - printk("num_bytes %Lu cur_alloc %Lu\n", num_bytes, + + if (disk_num_bytes < cur_alloc_size) { + printk("num_bytes %Lu cur_alloc %Lu\n", disk_num_bytes, cur_alloc_size); break; } + + if (will_compress) { + /* + * we're doing compression, we and we need to + * submit the compressed extents down to the device. + * + * We lock down all the file pages, clearing their + * dirty bits and setting them writeback. Everyone + * that wants to modify the page will wait on the + * ordered extent above. + * + * The writeback bits on the file pages are + * cleared when the compressed pages are on disk + */ + btrfs_end_transaction(trans, root); + + if (start <= page_offset(locked_page) && + page_offset(locked_page) < start + ram_size) { + *page_started = 1; + } + + extent_clear_unlock_delalloc(inode, + &BTRFS_I(inode)->io_tree, + start, + start + ram_size - 1, + NULL, 1, 1, 0); + + ret = btrfs_submit_compressed_write(inode, start, + ram_size, ins.objectid, + cur_alloc_size, pages, + nr_pages_ret); + + BUG_ON(ret); + trans = btrfs_join_transaction(root, 1); + if (start + ram_size < end) { + start += ram_size; + alloc_hint = ins.objectid + ins.offset; + /* pages will be freed at end_bio time */ + pages = NULL; + goto again; + } else { + /* we've written everything, time to go */ + break; + } + } + /* we're not doing compressed IO, don't unlock the first + * page (which the caller expects to stay locked), don't + * clear any dirty bits and don't set any writeback bits + */ + extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, + start, start + ram_size - 1, + locked_page, 0, 0, 0); + disk_num_bytes -= cur_alloc_size; num_bytes -= cur_alloc_size; alloc_hint = ins.objectid + ins.offset; start += cur_alloc_size; } + + ret = 0; out: btrfs_end_transaction(trans, root); + return ret; + +free_pages_out_fail: + extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, + start, end, locked_page, 0, 0, 0); +free_pages_out: + for (i = 0; i < nr_pages_ret; i++) + page_cache_release(pages[i]); + if (pages) + kfree(pages); + + goto out; } /* @@ -203,7 +591,8 @@ out: * If no cow copies or snapshots exist, we write directly to the existing * blocks on disk */ -static int run_delalloc_nocow(struct inode *inode, u64 start, u64 end) +static int run_delalloc_nocow(struct inode *inode, struct page *locked_page, + u64 start, u64 end, int *page_started) { u64 extent_start; u64 extent_end; @@ -260,6 +649,11 @@ again: extent_end = extent_start + extent_num_bytes; err = 0; + if (btrfs_file_extent_compression(leaf, item) || + btrfs_file_extent_encryption(leaf,item) || + btrfs_file_extent_other_encoding(leaf, item)) + goto not_found; + if (loops && start != extent_start) goto not_found; @@ -284,7 +678,8 @@ again: bytenr += btrfs_file_extent_offset(leaf, item); extent_num_bytes = min(end + 1, extent_end) - start; ret = btrfs_add_ordered_extent(inode, start, bytenr, - extent_num_bytes, 1); + extent_num_bytes, + extent_num_bytes, 1, 0); if (ret) { err = ret; goto out; @@ -300,7 +695,8 @@ again: not_found: btrfs_end_transaction(trans, root); btrfs_free_path(path); - return cow_file_range(inode, start, end); + return cow_file_range(inode, locked_page, start, end, + page_started); } out: WARN_ON(err); @@ -312,16 +708,19 @@ out: /* * extent_io.c call back to do delayed allocation processing */ -static int run_delalloc_range(struct inode *inode, u64 start, u64 end) +static int run_delalloc_range(struct inode *inode, struct page *locked_page, + u64 start, u64 end, int *page_started) { struct btrfs_root *root = BTRFS_I(inode)->root; int ret; if (btrfs_test_opt(root, NODATACOW) || btrfs_test_flag(inode, NODATACOW)) - ret = run_delalloc_nocow(inode, start, end); + ret = run_delalloc_nocow(inode, locked_page, start, end, + page_started); else - ret = cow_file_range(inode, start, end); + ret = cow_file_range(inode, locked_page, start, end, + page_started); return ret; } @@ -383,7 +782,8 @@ int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end, * we don't create bios that span stripes or chunks */ int btrfs_merge_bio_hook(struct page *page, unsigned long offset, - size_t size, struct bio *bio) + size_t size, struct bio *bio, + unsigned long bio_flags) { struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; struct btrfs_mapping_tree *map_tree; @@ -413,7 +813,7 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset, * are inserted into the btree */ int __btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, - int mirror_num) + int mirror_num, unsigned long bio_flags) { struct btrfs_root *root = BTRFS_I(inode)->root; int ret = 0; @@ -429,7 +829,7 @@ int __btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, * or reading the csums from the tree before a read */ int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, - int mirror_num) + int mirror_num, unsigned long bio_flags) { struct btrfs_root *root = BTRFS_I(inode)->root; int ret = 0; @@ -444,11 +844,17 @@ int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, if (!(rw & (1 << BIO_RW))) { btrfs_lookup_bio_sums(root, inode, bio); + + if (bio_flags & EXTENT_BIO_COMPRESSED) { + return btrfs_submit_compressed_read(inode, bio, + mirror_num, bio_flags); + } + goto mapit; } return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, inode, rw, bio, mirror_num, - __btrfs_submit_bio_hook); + bio_flags, __btrfs_submit_bio_hook); mapit: return btrfs_map_bio(root, rw, bio, mirror_num, 0); } @@ -539,7 +945,7 @@ out_page: * good idea. This causes problems because we want to make sure COW * properly happens and the data=ordered rules are followed. * - * In our case any range that doesn't have the EXTENT_ORDERED bit set + * In our case any range that doesn't have the ORDERED bit set * hasn't been properly setup for IO. We kick off an async process * to fix it up. The async helper will wait for ordered extents, set * the delalloc bit and make it safe to write the page. @@ -632,10 +1038,21 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) btrfs_set_file_extent_disk_bytenr(leaf, extent_item, ordered_extent->start); btrfs_set_file_extent_disk_num_bytes(leaf, extent_item, - ordered_extent->len); + ordered_extent->disk_len); btrfs_set_file_extent_offset(leaf, extent_item, 0); + + if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) + btrfs_set_file_extent_compression(leaf, extent_item, 1); + else + btrfs_set_file_extent_compression(leaf, extent_item, 0); + btrfs_set_file_extent_encryption(leaf, extent_item, 0); + btrfs_set_file_extent_other_encoding(leaf, extent_item, 0); + + /* ram bytes = extent_num_bytes for now */ btrfs_set_file_extent_num_bytes(leaf, extent_item, ordered_extent->len); + btrfs_set_file_extent_ram_bytes(leaf, extent_item, + ordered_extent->len); btrfs_mark_buffer_dirty(leaf); btrfs_drop_extent_cache(inode, ordered_extent->file_offset, @@ -644,7 +1061,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) mutex_unlock(&BTRFS_I(inode)->extent_mutex); ins.objectid = ordered_extent->start; - ins.offset = ordered_extent->len; + ins.offset = ordered_extent->disk_len; ins.type = BTRFS_EXTENT_ITEM_KEY; ret = btrfs_alloc_reserved_extent(trans, root, leaf->start, root->root_key.objectid, @@ -714,6 +1131,7 @@ int btrfs_io_failed_hook(struct bio *failed_bio, int ret; int rw; u64 logical; + unsigned long bio_flags = 0; ret = get_state_private(failure_tree, start, &private); if (ret) { @@ -738,6 +1156,8 @@ int btrfs_io_failed_hook(struct bio *failed_bio, } logical = start - em->start; logical = em->block_start + logical; + if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) + bio_flags = EXTENT_BIO_COMPRESSED; failrec->logical = logical; free_extent_map(em); set_extent_bits(failure_tree, start, end, EXTENT_LOCKED | @@ -781,7 +1201,8 @@ int btrfs_io_failed_hook(struct bio *failed_bio, rw = READ; BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio, - failrec->last_mirror); + failrec->last_mirror, + bio_flags); return 0; } @@ -1644,10 +2065,8 @@ search_again: item_end += btrfs_file_extent_num_bytes(leaf, fi); } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { - struct btrfs_item *item = btrfs_item_nr(leaf, - path->slots[0]); item_end += btrfs_file_extent_inline_len(leaf, - item); + fi); } item_end--; } @@ -1715,7 +2134,14 @@ search_again: root_owner = btrfs_header_owner(leaf); } } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { - if (!del_item) { + /* + * we can't truncate inline items that have had + * special encodings + */ + if (!del_item && + btrfs_file_extent_compression(leaf, fi) == 0 && + btrfs_file_extent_encryption(leaf, fi) == 0 && + btrfs_file_extent_other_encoding(leaf, fi) == 0) { u32 size = new_size - found_key.offset; if (root->ref_cows) { @@ -1926,7 +2352,8 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr) err = btrfs_insert_file_extent(trans, root, inode->i_ino, hole_start, 0, 0, - hole_size, 0); + hole_size, 0, hole_size, + 0, 0, 0); btrfs_drop_extent_cache(inode, hole_start, (u64)-1, 0); btrfs_check_file(root, inode); @@ -2894,11 +3321,50 @@ static int merge_extent_mapping(struct extent_map_tree *em_tree, start_diff = map_start - em->start; em->start = map_start; em->len = map_len; - if (em->block_start < EXTENT_MAP_LAST_BYTE) + if (em->block_start < EXTENT_MAP_LAST_BYTE && + !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { em->block_start += start_diff; + em->block_len -= start_diff; + } return add_extent_mapping(em_tree, em); } +static noinline int uncompress_inline(struct btrfs_path *path, + struct inode *inode, struct page *page, + size_t pg_offset, u64 extent_offset, + struct btrfs_file_extent_item *item) +{ + int ret; + struct extent_buffer *leaf = path->nodes[0]; + char *tmp; + size_t max_size; + unsigned long inline_size; + unsigned long ptr; + + WARN_ON(pg_offset != 0); + max_size = btrfs_file_extent_ram_bytes(leaf, item); + inline_size = btrfs_file_extent_inline_item_len(leaf, + btrfs_item_nr(leaf, path->slots[0])); + tmp = kmalloc(inline_size, GFP_NOFS); + ptr = btrfs_file_extent_inline_start(item); + + read_extent_buffer(leaf, tmp, ptr, inline_size); + + max_size = min(PAGE_CACHE_SIZE, max_size); + ret = btrfs_zlib_decompress(tmp, page, extent_offset, + inline_size, max_size); + if (ret) { + char *kaddr = kmap_atomic(page, KM_USER0); + unsigned long copy_size = min_t(u64, + PAGE_CACHE_SIZE - pg_offset, + max_size - extent_offset); + memset(kaddr + pg_offset, 0, copy_size); + kunmap_atomic(kaddr, KM_USER0); + } + kfree(tmp); + return 0; +} + /* * a bit scary, this does extent mapping from logical file offset to the disk. * the ugly parts come from merging extents from the disk with the @@ -2927,6 +3393,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct btrfs_trans_handle *trans = NULL; + int compressed; again: spin_lock(&em_tree->lock); @@ -2951,6 +3418,7 @@ again: em->bdev = root->fs_info->fs_devices->latest_bdev; em->start = EXTENT_MAP_HOLE; em->len = (u64)-1; + em->block_len = (u64)-1; if (!path) { path = btrfs_alloc_path(); @@ -2983,6 +3451,7 @@ again: found_type = btrfs_file_extent_type(leaf, item); extent_start = found_key.offset; + compressed = btrfs_file_extent_compression(leaf, item); if (found_type == BTRFS_FILE_EXTENT_REG) { extent_end = extent_start + btrfs_file_extent_num_bytes(leaf, item); @@ -3005,10 +3474,18 @@ again: em->block_start = EXTENT_MAP_HOLE; goto insert; } - bytenr += btrfs_file_extent_offset(leaf, item); - em->block_start = bytenr; em->start = extent_start; em->len = extent_end - extent_start; + if (compressed) { + set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); + em->block_start = bytenr; + em->block_len = btrfs_file_extent_disk_num_bytes(leaf, + item); + } else { + bytenr += btrfs_file_extent_offset(leaf, item); + em->block_start = bytenr; + em->block_len = em->len; + } goto insert; } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { u64 page_start; @@ -3018,8 +3495,7 @@ again: size_t extent_offset; size_t copy_size; - size = btrfs_file_extent_inline_len(leaf, btrfs_item_nr(leaf, - path->slots[0])); + size = btrfs_file_extent_inline_len(leaf, item); extent_end = (extent_start + size + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); if (start < extent_start || start >= extent_end) { @@ -3035,9 +3511,10 @@ again: } em->block_start = EXTENT_MAP_INLINE; - if (!page) { + if (!page || create) { em->start = extent_start; - em->len = size; + em->len = (size + root->sectorsize - 1) & + ~((u64)root->sectorsize - 1); goto out; } @@ -3048,11 +3525,22 @@ again: em->start = extent_start + extent_offset; em->len = (copy_size + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); - map = kmap(page); + if (compressed) + set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); ptr = btrfs_file_extent_inline_start(item) + extent_offset; if (create == 0 && !PageUptodate(page)) { - read_extent_buffer(leaf, map + pg_offset, ptr, - copy_size); + if (btrfs_file_extent_compression(leaf, item) == + BTRFS_COMPRESS_ZLIB) { + ret = uncompress_inline(path, inode, page, + pg_offset, + extent_offset, item); + BUG_ON(ret); + } else { + map = kmap(page); + read_extent_buffer(leaf, map + pg_offset, ptr, + copy_size); + kunmap(page); + } flush_dcache_page(page); } else if (create && PageUptodate(page)) { if (!trans) { @@ -3063,11 +3551,12 @@ again: trans = btrfs_join_transaction(root, 1); goto again; } + map = kmap(page); write_extent_buffer(leaf, map + pg_offset, ptr, copy_size); + kunmap(page); btrfs_mark_buffer_dirty(leaf); } - kunmap(page); set_extent_uptodate(io_tree, em->start, extent_map_end(em) - 1, GFP_NOFS); goto insert; @@ -3779,6 +4268,11 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, btrfs_set_file_extent_generation(leaf, ei, trans->transid); btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE); + btrfs_set_file_extent_encryption(leaf, ei, 0); + btrfs_set_file_extent_compression(leaf, ei, 0); + btrfs_set_file_extent_other_encoding(leaf, ei, 0); + btrfs_set_file_extent_ram_bytes(leaf, ei, name_len); + ptr = btrfs_file_extent_inline_start(ei); write_extent_buffer(leaf, symname, ptr, name_len); btrfs_mark_buffer_dirty(leaf); diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 2eb6caba57c..b5745bb96d4 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -165,7 +165,8 @@ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, * inserted. */ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, - u64 start, u64 len, int nocow) + u64 start, u64 len, u64 disk_len, int nocow, + int compressed) { struct btrfs_ordered_inode_tree *tree; struct rb_node *node; @@ -180,9 +181,12 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, entry->file_offset = file_offset; entry->start = start; entry->len = len; + entry->disk_len = disk_len; entry->inode = inode; if (nocow) set_bit(BTRFS_ORDERED_NOCOW, &entry->flags); + if (compressed) + set_bit(BTRFS_ORDERED_COMPRESSED, &entry->flags); /* one ref for the tree */ atomic_set(&entry->refs, 1); @@ -389,9 +393,10 @@ void btrfs_start_ordered_extent(struct inode *inode, * for pdflush to find them */ btrfs_fdatawrite_range(inode->i_mapping, start, end, WB_SYNC_NONE); - if (wait) + if (wait) { wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, &entry->flags)); + } } /* diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index f50f8870a14..1ef464145d2 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -66,6 +66,8 @@ struct btrfs_ordered_sum { #define BTRFS_ORDERED_NOCOW 2 /* set when we want to write in place */ +#define BTRFS_ORDERED_COMPRESSED 3 /* writing a compressed extent */ + struct btrfs_ordered_extent { /* logical offset in the file */ u64 file_offset; @@ -73,9 +75,12 @@ struct btrfs_ordered_extent { /* disk byte number */ u64 start; - /* length of the extent in bytes */ + /* ram length of the extent in bytes */ u64 len; + /* extent length on disk */ + u64 disk_len; + /* flags (described above) */ unsigned long flags; @@ -127,7 +132,8 @@ int btrfs_remove_ordered_extent(struct inode *inode, int btrfs_dec_test_ordered_pending(struct inode *inode, u64 file_offset, u64 io_size); int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, - u64 start, u64 len, int nocow); + u64 start, u64 len, u64 disk_len, int nocow, + int compressed); int btrfs_add_ordered_sum(struct inode *inode, struct btrfs_ordered_extent *entry, struct btrfs_ordered_sum *sum); diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c index bd9ab3e9a7f..64725c13aa1 100644 --- a/fs/btrfs/print-tree.c +++ b/fs/btrfs/print-tree.c @@ -115,15 +115,16 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l) if (btrfs_file_extent_type(l, fi) == BTRFS_FILE_EXTENT_INLINE) { printk("\t\tinline extent data size %u\n", - btrfs_file_extent_inline_len(l, item)); + btrfs_file_extent_inline_len(l, fi)); break; } printk("\t\textent data disk bytenr %llu nr %llu\n", (unsigned long long)btrfs_file_extent_disk_bytenr(l, fi), (unsigned long long)btrfs_file_extent_disk_num_bytes(l, fi)); - printk("\t\textent data offset %llu nr %llu\n", + printk("\t\textent data offset %llu nr %llu ram %llu\n", (unsigned long long)btrfs_file_extent_offset(l, fi), - (unsigned long long)btrfs_file_extent_num_bytes(l, fi)); + (unsigned long long)btrfs_file_extent_num_bytes(l, fi), + (unsigned long long)btrfs_file_extent_ram_bytes(l, fi)); break; case BTRFS_BLOCK_GROUP_ITEM_KEY: bi = btrfs_item_ptr(l, i, diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 2e6039825b7..431fdf144b5 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -47,6 +47,7 @@ #include "volumes.h" #include "version.h" #include "export.h" +#include "compression.h" #define BTRFS_SUPER_MAGIC 0x9123683E @@ -69,7 +70,7 @@ static void btrfs_put_super (struct super_block * sb) enum { Opt_degraded, Opt_subvol, Opt_device, Opt_nodatasum, Opt_nodatacow, Opt_max_extent, Opt_max_inline, Opt_alloc_start, Opt_nobarrier, - Opt_ssd, Opt_thread_pool, Opt_noacl, Opt_err, + Opt_ssd, Opt_thread_pool, Opt_noacl, Opt_compress, Opt_err, }; static match_table_t tokens = { @@ -83,6 +84,7 @@ static match_table_t tokens = { {Opt_max_inline, "max_inline=%s"}, {Opt_alloc_start, "alloc_start=%s"}, {Opt_thread_pool, "thread_pool=%d"}, + {Opt_compress, "compress"}, {Opt_ssd, "ssd"}, {Opt_noacl, "noacl"}, {Opt_err, NULL}, @@ -163,6 +165,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) btrfs_set_opt(info->mount_opt, NODATACOW); btrfs_set_opt(info->mount_opt, NODATASUM); break; + case Opt_compress: + printk(KERN_INFO "btrfs: use compression\n"); + btrfs_set_opt(info->mount_opt, COMPRESS); + break; case Opt_ssd: printk(KERN_INFO "btrfs: use ssd allocation scheme\n"); btrfs_set_opt(info->mount_opt, SSD); @@ -622,6 +628,7 @@ static int __init init_btrfs_fs(void) err = btrfs_interface_init(); if (err) goto free_extent_map; + err = register_filesystem(&btrfs_fs_type); if (err) goto unregister_ioctl; @@ -651,6 +658,7 @@ static void __exit exit_btrfs_fs(void) unregister_filesystem(&btrfs_fs_type); btrfs_exit_sysfs(); btrfs_cleanup_fs_uuids(); + btrfs_zlib_exit(); } module_init(init_btrfs_fs) diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index cf618cc8b34..e6d579053a4 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -540,8 +540,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, if (found_type == BTRFS_FILE_EXTENT_REG) extent_end = start + btrfs_file_extent_num_bytes(eb, item); else if (found_type == BTRFS_FILE_EXTENT_INLINE) { - size = btrfs_file_extent_inline_len(eb, - btrfs_item_nr(eb, slot)); + size = btrfs_file_extent_inline_len(eb, item); extent_end = (start + size + mask) & ~mask; } else { ret = 0; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 2eed7f91f51..7db4cfd03a9 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1816,6 +1816,7 @@ again: em->start = key.offset; em->len = *num_bytes; em->block_start = 0; + em->block_len = em->len; if (type & BTRFS_BLOCK_GROUP_SYSTEM) { ret = btrfs_add_system_chunk(trans, chunk_root, &key, @@ -2323,6 +2324,7 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, em->start = logical; em->len = length; em->block_start = 0; + em->block_len = em->len; map->num_stripes = num_stripes; map->io_width = btrfs_chunk_io_width(leaf, chunk); diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c new file mode 100644 index 00000000000..e99309180a1 --- /dev/null +++ b/fs/btrfs/zlib.c @@ -0,0 +1,637 @@ +/* + * Copyright (C) 2008 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License v2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + * + * Based on jffs2 zlib code: + * Copyright © 2001-2007 Red Hat, Inc. + * Created by David Woodhouse + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Plan: call deflate() with avail_in == *sourcelen, + avail_out = *dstlen - 12 and flush == Z_FINISH. + If it doesn't manage to finish, call it again with + avail_in == 0 and avail_out set to the remaining 12 + bytes for it to clean up. + Q: Is 12 bytes sufficient? +*/ +#define STREAM_END_SPACE 12 + +struct workspace { + z_stream inf_strm; + z_stream def_strm; + char *buf; + struct list_head list; +}; + +static LIST_HEAD(idle_workspace); +static DEFINE_SPINLOCK(workspace_lock); +static unsigned long num_workspace; +static atomic_t alloc_workspace = ATOMIC_INIT(0); +static DECLARE_WAIT_QUEUE_HEAD(workspace_wait); + +/* + * this finds an available zlib workspace or allocates a new one + * NULL or an ERR_PTR is returned if things go bad. + */ +static struct workspace *find_zlib_workspace(void) +{ + struct workspace *workspace; + int ret; + int cpus = num_online_cpus(); + +again: + spin_lock(&workspace_lock); + if (!list_empty(&idle_workspace)) { + workspace = list_entry(idle_workspace.next, struct workspace, + list); + list_del(&workspace->list); + num_workspace--; + spin_unlock(&workspace_lock); + return workspace; + + } + spin_unlock(&workspace_lock); + if (atomic_read(&alloc_workspace) > cpus) { + DEFINE_WAIT(wait); + prepare_to_wait(&workspace_wait, &wait, TASK_UNINTERRUPTIBLE); + if (atomic_read(&alloc_workspace) > cpus) + schedule(); + finish_wait(&workspace_wait, &wait); + goto again; + } + atomic_inc(&alloc_workspace); + workspace = kzalloc(sizeof(*workspace), GFP_NOFS); + if (!workspace) { + ret = -ENOMEM; + goto fail; + } + + workspace->def_strm.workspace = vmalloc(zlib_deflate_workspacesize()); + if (!workspace->def_strm.workspace) { + ret = -ENOMEM; + goto fail; + } + workspace->inf_strm.workspace = vmalloc(zlib_inflate_workspacesize()); + if (!workspace->inf_strm.workspace) { + ret = -ENOMEM; + goto fail_inflate; + } + workspace->buf = kmalloc(PAGE_CACHE_SIZE, GFP_NOFS); + if (!workspace->buf) { + ret = -ENOMEM; + goto fail_kmalloc; + } + return workspace; + +fail_kmalloc: + vfree(workspace->inf_strm.workspace); +fail_inflate: + vfree(workspace->def_strm.workspace); +fail: + kfree(workspace); + atomic_dec(&alloc_workspace); + wake_up(&workspace_wait); + return ERR_PTR(ret); +} + +/* + * put a workspace struct back on the list or free it if we have enough + * idle ones sitting around + */ +static int free_workspace(struct workspace *workspace) +{ + spin_lock(&workspace_lock); + if (num_workspace < num_online_cpus()) { + list_add_tail(&workspace->list, &idle_workspace); + num_workspace++; + spin_unlock(&workspace_lock); + if (waitqueue_active(&workspace_wait)) + wake_up(&workspace_wait); + return 0; + } + spin_unlock(&workspace_lock); + vfree(workspace->def_strm.workspace); + vfree(workspace->inf_strm.workspace); + kfree(workspace->buf); + kfree(workspace); + + atomic_dec(&alloc_workspace); + if (waitqueue_active(&workspace_wait)) + wake_up(&workspace_wait); + return 0; +} + +/* + * cleanup function for module exit + */ +static void free_workspaces(void) +{ + struct workspace *workspace; + while(!list_empty(&idle_workspace)) { + workspace = list_entry(idle_workspace.next, struct workspace, + list); + list_del(&workspace->list); + vfree(workspace->def_strm.workspace); + vfree(workspace->inf_strm.workspace); + kfree(workspace->buf); + kfree(workspace); + atomic_dec(&alloc_workspace); + } +} + +/* + * given an address space and start/len, compress the bytes. + * + * pages are allocated to hold the compressed result and stored + * in 'pages' + * + * out_pages is used to return the number of pages allocated. There + * may be pages allocated even if we return an error + * + * total_in is used to return the number of bytes actually read. It + * may be smaller then len if we had to exit early because we + * ran out of room in the pages array or because we cross the + * max_out threshold. + * + * total_out is used to return the total number of compressed bytes + * + * max_out tells us the max number of bytes that we're allowed to + * stuff into pages + */ +int btrfs_zlib_compress_pages(struct address_space *mapping, + u64 start, unsigned long len, + struct page **pages, + unsigned long nr_dest_pages, + unsigned long *out_pages, + unsigned long *total_in, + unsigned long *total_out, + unsigned long max_out) +{ + int ret; + struct workspace *workspace; + char *data_in; + char *cpage_out; + int nr_pages = 0; + struct page *in_page = NULL; + struct page *out_page = NULL; + int out_written = 0; + int in_read = 0; + unsigned long bytes_left; + + *out_pages = 0; + *total_out = 0; + *total_in = 0; + + workspace = find_zlib_workspace(); + if (!workspace) + return -1; + + if (Z_OK != zlib_deflateInit(&workspace->def_strm, 3)) { + printk(KERN_WARNING "deflateInit failed\n"); + ret = -1; + goto out; + } + + workspace->def_strm.total_in = 0; + workspace->def_strm.total_out = 0; + + in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); + data_in = kmap(in_page); + + out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); + cpage_out = kmap(out_page); + pages[0] = out_page; + nr_pages = 1; + + workspace->def_strm.next_in = data_in; + workspace->def_strm.next_out = cpage_out; + workspace->def_strm.avail_out = PAGE_CACHE_SIZE; + workspace->def_strm.avail_in = min(len, PAGE_CACHE_SIZE); + + out_written = 0; + in_read = 0; + + while (workspace->def_strm.total_in < len) { + ret = zlib_deflate(&workspace->def_strm, Z_SYNC_FLUSH); + if (ret != Z_OK) { + printk(KERN_DEBUG "btrfs deflate in loop returned %d\n", + ret); + zlib_deflateEnd(&workspace->def_strm); + ret = -1; + goto out; + } + + /* we're making it bigger, give up */ + if (workspace->def_strm.total_in > 8192 && + workspace->def_strm.total_in < + workspace->def_strm.total_out) { + ret = -1; + goto out; + } + /* we need another page for writing out. Test this + * before the total_in so we will pull in a new page for + * the stream end if required + */ + if (workspace->def_strm.avail_out == 0) { + kunmap(out_page); + if (nr_pages == nr_dest_pages) { + out_page = NULL; + ret = -1; + goto out; + } + out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); + cpage_out = kmap(out_page); + pages[nr_pages] = out_page; + nr_pages++; + workspace->def_strm.avail_out = PAGE_CACHE_SIZE; + workspace->def_strm.next_out = cpage_out; + } + /* we're all done */ + if (workspace->def_strm.total_in >= len) + break; + + /* we've read in a full page, get a new one */ + if (workspace->def_strm.avail_in == 0) { + if (workspace->def_strm.total_out > max_out) + break; + + bytes_left = len - workspace->def_strm.total_in; + kunmap(in_page); + page_cache_release(in_page); + + start += PAGE_CACHE_SIZE; + in_page = find_get_page(mapping, + start >> PAGE_CACHE_SHIFT); + data_in = kmap(in_page); + workspace->def_strm.avail_in = min(bytes_left, + PAGE_CACHE_SIZE); + workspace->def_strm.next_in = data_in; + } + } + workspace->def_strm.avail_in = 0; + ret = zlib_deflate(&workspace->def_strm, Z_FINISH); + zlib_deflateEnd(&workspace->def_strm); + + if (ret != Z_STREAM_END) { + ret = -1; + goto out; + } + + if (workspace->def_strm.total_out >= workspace->def_strm.total_in) { + ret = -1; + goto out; + } + + ret = 0; + *total_out = workspace->def_strm.total_out; + *total_in = workspace->def_strm.total_in; +out: + *out_pages = nr_pages; + if (out_page) + kunmap(out_page); + + if (in_page) { + kunmap(in_page); + page_cache_release(in_page); + } + free_workspace(workspace); + return ret; +} + +/* + * pages_in is an array of pages with compressed data. + * + * disk_start is the starting logical offset of this array in the file + * + * bvec is a bio_vec of pages from the file that we want to decompress into + * + * vcnt is the count of pages in the biovec + * + * srclen is the number of bytes in pages_in + * + * The basic idea is that we have a bio that was created by readpages. + * The pages in the bio are for the uncompressed data, and they may not + * be contiguous. They all correspond to the range of bytes covered by + * the compressed extent. + */ +int btrfs_zlib_decompress_biovec(struct page **pages_in, + u64 disk_start, + struct bio_vec *bvec, + int vcnt, + size_t srclen) +{ + int ret = 0; + int wbits = MAX_WBITS; + struct workspace *workspace; + char *data_in; + size_t total_out = 0; + unsigned long page_bytes_left; + unsigned long page_in_index = 0; + unsigned long page_out_index = 0; + struct page *page_out; + unsigned long total_pages_in = (srclen + PAGE_CACHE_SIZE - 1) / + PAGE_CACHE_SIZE; + unsigned long buf_start; + unsigned long buf_offset; + unsigned long bytes; + unsigned long working_bytes; + unsigned long pg_offset; + unsigned long start_byte; + unsigned long current_buf_start; + char *kaddr; + + workspace = find_zlib_workspace(); + if (!workspace) + return -ENOMEM; + + data_in = kmap(pages_in[page_in_index]); + workspace->inf_strm.next_in = data_in; + workspace->inf_strm.avail_in = min(srclen, PAGE_CACHE_SIZE); + workspace->inf_strm.total_in = 0; + + workspace->inf_strm.total_out = 0; + workspace->inf_strm.next_out = workspace->buf; + workspace->inf_strm.avail_out = PAGE_CACHE_SIZE; + page_out = bvec[page_out_index].bv_page; + page_bytes_left = PAGE_CACHE_SIZE; + pg_offset = 0; + + /* If it's deflate, and it's got no preset dictionary, then + we can tell zlib to skip the adler32 check. */ + if (srclen > 2 && !(data_in[1] & PRESET_DICT) && + ((data_in[0] & 0x0f) == Z_DEFLATED) && + !(((data_in[0]<<8) + data_in[1]) % 31)) { + + wbits = -((data_in[0] >> 4) + 8); + workspace->inf_strm.next_in += 2; + workspace->inf_strm.avail_in -= 2; + } + + if (Z_OK != zlib_inflateInit2(&workspace->inf_strm, wbits)) { + printk(KERN_WARNING "inflateInit failed\n"); + ret = -1; + goto out; + } + while(workspace->inf_strm.total_in < srclen) { + ret = zlib_inflate(&workspace->inf_strm, Z_NO_FLUSH); + if (ret != Z_OK && ret != Z_STREAM_END) { + break; + } + + /* + * buf start is the byte offset we're of the start of + * our workspace buffer + */ + buf_start = total_out; + + /* total_out is the last byte of the workspace buffer */ + total_out = workspace->inf_strm.total_out; + + working_bytes = total_out - buf_start; + + /* + * start byte is the first byte of the page we're currently + * copying into relative to the start of the compressed data. + */ + start_byte = page_offset(page_out) - disk_start; + + if (working_bytes == 0) { + /* we didn't make progress in this inflate + * call, we're done + */ + if (ret != Z_STREAM_END) + ret = -1; + break; + } + + /* we haven't yet hit data corresponding to this page */ + if (total_out <= start_byte) { + goto next; + } + + /* + * the start of the data we care about is offset into + * the middle of our working buffer + */ + if (total_out > start_byte && buf_start < start_byte) { + buf_offset = start_byte - buf_start; + working_bytes -= buf_offset; + } else { + buf_offset = 0; + } + current_buf_start = buf_start; + + /* copy bytes from the working buffer into the pages */ + while(working_bytes > 0) { + bytes = min(PAGE_CACHE_SIZE - pg_offset, + PAGE_CACHE_SIZE - buf_offset); + bytes = min(bytes, working_bytes); + kaddr = kmap_atomic(page_out, KM_USER0); + memcpy(kaddr + pg_offset, workspace->buf + buf_offset, + bytes); + kunmap_atomic(kaddr, KM_USER0); + flush_dcache_page(page_out); + + pg_offset += bytes; + page_bytes_left -= bytes; + buf_offset += bytes; + working_bytes -= bytes; + current_buf_start += bytes; + + /* check if we need to pick another page */ + if (page_bytes_left == 0) { + page_out_index++; + if (page_out_index >= vcnt) { + ret = 0; + goto done; + } + page_out = bvec[page_out_index].bv_page; + pg_offset = 0; + page_bytes_left = PAGE_CACHE_SIZE; + start_byte = page_offset(page_out) - disk_start; + + /* + * make sure our new page is covered by this + * working buffer + */ + if (total_out <= start_byte) { + goto next; + } + + /* the next page in the biovec might not + * be adjacent to the last page, but it + * might still be found inside this working + * buffer. bump our offset pointer + */ + if (total_out > start_byte && + current_buf_start < start_byte) { + buf_offset = start_byte - buf_start; + working_bytes = total_out - start_byte; + current_buf_start = buf_start + + buf_offset; + } + } + } +next: + workspace->inf_strm.next_out = workspace->buf; + workspace->inf_strm.avail_out = PAGE_CACHE_SIZE; + + if (workspace->inf_strm.avail_in == 0) { + unsigned long tmp; + kunmap(pages_in[page_in_index]); + page_in_index++; + if (page_in_index >= total_pages_in) { + data_in = NULL; + break; + } + data_in = kmap(pages_in[page_in_index]); + workspace->inf_strm.next_in = data_in; + tmp = srclen - workspace->inf_strm.total_in; + workspace->inf_strm.avail_in = min(tmp, + PAGE_CACHE_SIZE); + } + } + if (ret != Z_STREAM_END) { + ret = -1; + } else { + ret = 0; + } +done: + zlib_inflateEnd(&workspace->inf_strm); + if (data_in) + kunmap(pages_in[page_in_index]); +out: + free_workspace(workspace); + return ret; +} + +/* + * a less complex decompression routine. Our compressed data fits in a + * single page, and we want to read a single page out of it. + * start_byte tells us the offset into the compressed data we're interested in + */ +int btrfs_zlib_decompress(unsigned char *data_in, + struct page *dest_page, + unsigned long start_byte, + size_t srclen, size_t destlen) +{ + int ret = 0; + int wbits = MAX_WBITS; + struct workspace *workspace; + unsigned long bytes_left = destlen; + unsigned long total_out = 0; + char *kaddr; + + if (destlen > PAGE_CACHE_SIZE) + return -ENOMEM; + + workspace = find_zlib_workspace(); + if (!workspace) + return -ENOMEM; + + workspace->inf_strm.next_in = data_in; + workspace->inf_strm.avail_in = srclen; + workspace->inf_strm.total_in = 0; + + workspace->inf_strm.next_out = workspace->buf; + workspace->inf_strm.avail_out = PAGE_CACHE_SIZE; + workspace->inf_strm.total_out = 0; + /* If it's deflate, and it's got no preset dictionary, then + we can tell zlib to skip the adler32 check. */ + if (srclen > 2 && !(data_in[1] & PRESET_DICT) && + ((data_in[0] & 0x0f) == Z_DEFLATED) && + !(((data_in[0]<<8) + data_in[1]) % 31)) { + + wbits = -((data_in[0] >> 4) + 8); + workspace->inf_strm.next_in += 2; + workspace->inf_strm.avail_in -= 2; + } + + if (Z_OK != zlib_inflateInit2(&workspace->inf_strm, wbits)) { + printk(KERN_WARNING "inflateInit failed\n"); + ret = -1; + goto out; + } + + while(bytes_left > 0) { + unsigned long buf_start; + unsigned long buf_offset; + unsigned long bytes; + unsigned long pg_offset = 0; + + ret = zlib_inflate(&workspace->inf_strm, Z_NO_FLUSH); + if (ret != Z_OK && ret != Z_STREAM_END) { + break; + } + + buf_start = total_out; + total_out = workspace->inf_strm.total_out; + + if (total_out == buf_start) { + ret = -1; + break; + } + + if (total_out <= start_byte) { + goto next; + } + + if (total_out > start_byte && buf_start < start_byte) { + buf_offset = start_byte - buf_start; + } else { + buf_offset = 0; + } + + bytes = min(PAGE_CACHE_SIZE - pg_offset, + PAGE_CACHE_SIZE - buf_offset); + bytes = min(bytes, bytes_left); + + kaddr = kmap_atomic(dest_page, KM_USER0); + memcpy(kaddr + pg_offset, workspace->buf + buf_offset, bytes); + kunmap_atomic(kaddr, KM_USER0); + + pg_offset += bytes; + bytes_left -= bytes; +next: + workspace->inf_strm.next_out = workspace->buf; + workspace->inf_strm.avail_out = PAGE_CACHE_SIZE; + } + if (ret != Z_STREAM_END && bytes_left != 0) { + ret = -1; + } else { + ret = 0; + } + zlib_inflateEnd(&workspace->inf_strm); +out: + free_workspace(workspace); + return ret; +} + +void btrfs_zlib_exit(void) +{ + free_workspaces(); +} -- cgit v1.2.3-70-g09d2 From 80ff385665b7fca29fefe358a60ab0d09f9b8e87 Mon Sep 17 00:00:00 2001 From: Yan Zheng Date: Thu, 30 Oct 2008 14:20:02 -0400 Subject: Btrfs: update nodatacow code v2 This patch simplifies the nodatacow checker. If all references were created after the latest snapshot, then we can avoid COW safely. This patch also updates run_delalloc_nocow to do more fine-grained checking. Signed-off-by: Yan Zheng --- fs/btrfs/ctree.h | 8 +- fs/btrfs/extent-tree.c | 131 ++++------------------------- fs/btrfs/inode.c | 213 ++++++++++++++++++++++++++++-------------------- fs/btrfs/ioctl.c | 1 + fs/btrfs/ordered-data.c | 9 +- fs/btrfs/ordered-data.h | 3 +- fs/btrfs/transaction.c | 2 + 7 files changed, 154 insertions(+), 213 deletions(-) (limited to 'fs/btrfs/ordered-data.c') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index ca5547af609..8bf6a085a73 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -454,6 +454,7 @@ struct btrfs_root_item { __le64 bytenr; __le64 byte_limit; __le64 bytes_used; + __le64 last_snapshot; __le32 flags; __le32 refs; struct btrfs_disk_key drop_progress; @@ -1413,6 +1414,8 @@ BTRFS_SETGET_STACK_FUNCS(root_refs, struct btrfs_root_item, refs, 32); BTRFS_SETGET_STACK_FUNCS(root_flags, struct btrfs_root_item, flags, 32); BTRFS_SETGET_STACK_FUNCS(root_used, struct btrfs_root_item, bytes_used, 64); BTRFS_SETGET_STACK_FUNCS(root_limit, struct btrfs_root_item, byte_limit, 64); +BTRFS_SETGET_STACK_FUNCS(root_last_snapshot, struct btrfs_root_item, + last_snapshot, 64); /* struct btrfs_super_block */ BTRFS_SETGET_STACK_FUNCS(super_bytenr, struct btrfs_super_block, bytenr, 64); @@ -1564,9 +1567,8 @@ int btrfs_update_pinned_extents(struct btrfs_root *root, u64 bytenr, u64 num, int pin); int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *leaf); -int btrfs_cross_ref_exists(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct btrfs_key *key, u64 bytenr); +int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans, + struct btrfs_root *root, u64 bytenr); int btrfs_extent_post_op(struct btrfs_trans_handle *trans, struct btrfs_root *root); int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 535cee47fcf..1eb69a91b72 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -848,9 +848,8 @@ out: return 0; } -static int get_reference_status(struct btrfs_root *root, u64 bytenr, - u64 parent_gen, u64 ref_objectid, - u64 *min_generation, u32 *ref_count) +int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans, + struct btrfs_root *root, u64 bytenr) { struct btrfs_root *extent_root = root->fs_info->extent_root; struct btrfs_path *path; @@ -858,8 +857,8 @@ static int get_reference_status(struct btrfs_root *root, u64 bytenr, struct btrfs_extent_ref *ref_item; struct btrfs_key key; struct btrfs_key found_key; - u64 root_objectid = root->root_key.objectid; - u64 ref_generation; + u64 ref_root; + u64 last_snapshot; u32 nritems; int ret; @@ -872,7 +871,9 @@ static int get_reference_status(struct btrfs_root *root, u64 bytenr, if (ret < 0) goto out; BUG_ON(ret == 0); - if (ret < 0 || path->slots[0] == 0) + + ret = -ENOENT; + if (path->slots[0] == 0) goto out; path->slots[0]--; @@ -880,14 +881,10 @@ static int get_reference_status(struct btrfs_root *root, u64 bytenr, btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); if (found_key.objectid != bytenr || - found_key.type != BTRFS_EXTENT_ITEM_KEY) { - ret = 1; + found_key.type != BTRFS_EXTENT_ITEM_KEY) goto out; - } - - *ref_count = 0; - *min_generation = (u64)-1; + last_snapshot = btrfs_root_last_snapshot(&root->root_item); while (1) { leaf = path->nodes[0]; nritems = btrfs_header_nritems(leaf); @@ -910,114 +907,22 @@ static int get_reference_status(struct btrfs_root *root, u64 bytenr, ref_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref); - ref_generation = btrfs_ref_generation(leaf, ref_item); - /* - * For (parent_gen > 0 && parent_gen > ref_generation): - * - * we reach here through the oldest root, therefore - * all other reference from same snapshot should have - * a larger generation. - */ - if ((root_objectid != btrfs_ref_root(leaf, ref_item)) || - (parent_gen > 0 && parent_gen > ref_generation) || - (ref_objectid >= BTRFS_FIRST_FREE_OBJECTID && - ref_objectid != btrfs_ref_objectid(leaf, ref_item))) { - *ref_count = 2; - break; - } - - *ref_count = 1; - if (*min_generation > ref_generation) - *min_generation = ref_generation; - - path->slots[0]++; - } - ret = 0; -out: - btrfs_free_path(path); - return ret; -} - -int btrfs_cross_ref_exists(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct btrfs_key *key, u64 bytenr) -{ - struct btrfs_root *old_root; - struct btrfs_path *path = NULL; - struct extent_buffer *eb; - struct btrfs_file_extent_item *item; - u64 ref_generation; - u64 min_generation; - u64 extent_start; - u32 ref_count; - int level; - int ret; - - BUG_ON(trans == NULL); - BUG_ON(key->type != BTRFS_EXTENT_DATA_KEY); - ret = get_reference_status(root, bytenr, 0, key->objectid, - &min_generation, &ref_count); - if (ret) - return ret; - - if (ref_count != 1) - return 1; - - old_root = root->dirty_root->root; - ref_generation = old_root->root_key.offset; - - /* all references are created in running transaction */ - if (min_generation > ref_generation) { - ret = 0; - goto out; - } - - path = btrfs_alloc_path(); - if (!path) { - ret = -ENOMEM; - goto out; - } - - path->skip_locking = 1; - /* if no item found, the extent is referenced by other snapshot */ - ret = btrfs_search_slot(NULL, old_root, key, path, 0, 0); - if (ret) - goto out; - - eb = path->nodes[0]; - item = btrfs_item_ptr(eb, path->slots[0], - struct btrfs_file_extent_item); - if (btrfs_file_extent_type(eb, item) != BTRFS_FILE_EXTENT_REG || - btrfs_file_extent_disk_bytenr(eb, item) != bytenr) { - ret = 1; - goto out; - } - - for (level = BTRFS_MAX_LEVEL - 1; level >= -1; level--) { - if (level >= 0) { - eb = path->nodes[level]; - if (!eb) - continue; - extent_start = eb->start; - } else - extent_start = bytenr; - - ret = get_reference_status(root, extent_start, ref_generation, - 0, &min_generation, &ref_count); - if (ret) + ref_root = btrfs_ref_root(leaf, ref_item); + if (ref_root != root->root_key.objectid && + ref_root != BTRFS_TREE_LOG_OBJECTID) { + ret = 1; goto out; - - if (ref_count != 1) { + } + if (btrfs_ref_generation(leaf, ref_item) <= last_snapshot) { ret = 1; goto out; } - if (level >= 0) - ref_generation = btrfs_header_generation(eb); + + path->slots[0]++; } ret = 0; out: - if (path) - btrfs_free_path(path); + btrfs_free_path(path); return ret; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index e8511d14b11..3e6f0568fdb 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -298,6 +298,7 @@ static int cow_file_range(struct inode *inode, struct page *locked_page, unsigned long max_compressed = 128 * 1024; unsigned long max_uncompressed = 256 * 1024; int i; + int ordered_type; int will_compress; trans = btrfs_join_transaction(root, 1); @@ -491,9 +492,10 @@ again: } cur_alloc_size = ins.offset; + ordered_type = will_compress ? BTRFS_ORDERED_COMPRESSED : 0; ret = btrfs_add_ordered_extent(inode, start, ins.objectid, - ram_size, cur_alloc_size, 0, - will_compress); + ram_size, cur_alloc_size, + ordered_type); BUG_ON(ret); if (disk_num_bytes < cur_alloc_size) { @@ -587,115 +589,148 @@ free_pages_out: static int run_delalloc_nocow(struct inode *inode, struct page *locked_page, u64 start, u64 end, int *page_started) { - u64 extent_start; - u64 extent_end; - u64 bytenr; - u64 loops = 0; - u64 total_fs_bytes; struct btrfs_root *root = BTRFS_I(inode)->root; - struct btrfs_block_group_cache *block_group; struct btrfs_trans_handle *trans; struct extent_buffer *leaf; - int found_type; struct btrfs_path *path; - struct btrfs_file_extent_item *item; - int ret; - int err = 0; + struct btrfs_file_extent_item *fi; struct btrfs_key found_key; + u64 cow_start; + u64 cur_offset; + u64 extent_end; + u64 disk_bytenr; + u64 num_bytes; + int extent_type; + int ret; + int nocow; + int check_prev = 1; - total_fs_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy); path = btrfs_alloc_path(); BUG_ON(!path); trans = btrfs_join_transaction(root, 1); BUG_ON(!trans); -again: - ret = btrfs_lookup_file_extent(NULL, root, path, - inode->i_ino, start, 0); - if (ret < 0) { - err = ret; - goto out; - } - - if (ret != 0) { - if (path->slots[0] == 0) - goto not_found; - path->slots[0]--; - } - - leaf = path->nodes[0]; - item = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_file_extent_item); - - /* are we inside the extent that was found? */ - btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - found_type = btrfs_key_type(&found_key); - if (found_key.objectid != inode->i_ino || - found_type != BTRFS_EXTENT_DATA_KEY) - goto not_found; - - found_type = btrfs_file_extent_type(leaf, item); - extent_start = found_key.offset; - if (found_type == BTRFS_FILE_EXTENT_REG) { - u64 extent_num_bytes; - - extent_num_bytes = btrfs_file_extent_num_bytes(leaf, item); - extent_end = extent_start + extent_num_bytes; - err = 0; - if (btrfs_file_extent_compression(leaf, item) || - btrfs_file_extent_encryption(leaf,item) || - btrfs_file_extent_other_encoding(leaf, item)) - goto not_found; + cow_start = (u64)-1; + cur_offset = start; + while (1) { + ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, + cur_offset, 0); + BUG_ON(ret < 0); + if (ret > 0 && path->slots[0] > 0 && check_prev) { + leaf = path->nodes[0]; + btrfs_item_key_to_cpu(leaf, &found_key, + path->slots[0] - 1); + if (found_key.objectid == inode->i_ino && + found_key.type == BTRFS_EXTENT_DATA_KEY) + path->slots[0]--; + } + check_prev = 0; +next_slot: + leaf = path->nodes[0]; + if (path->slots[0] >= btrfs_header_nritems(leaf)) { + ret = btrfs_next_leaf(root, path); + if (ret < 0) + BUG_ON(1); + if (ret > 0) + break; + leaf = path->nodes[0]; + } - if (loops && start != extent_start) - goto not_found; + nocow = 0; + disk_bytenr = 0; + btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - if (start < extent_start || start >= extent_end) - goto not_found; + if (found_key.objectid > inode->i_ino || + found_key.type > BTRFS_EXTENT_DATA_KEY || + found_key.offset > end) + break; - bytenr = btrfs_file_extent_disk_bytenr(leaf, item); - if (bytenr == 0) - goto not_found; + if (found_key.offset > cur_offset) { + extent_end = found_key.offset; + goto out_check; + } - if (btrfs_cross_ref_exists(trans, root, &found_key, bytenr)) - goto not_found; - /* - * we may be called by the resizer, make sure we're inside - * the limits of the FS - */ - block_group = btrfs_lookup_block_group(root->fs_info, - bytenr); - if (!block_group || block_group->ro) - goto not_found; + fi = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_file_extent_item); + extent_type = btrfs_file_extent_type(leaf, fi); - bytenr += btrfs_file_extent_offset(leaf, item); - extent_num_bytes = min(end + 1, extent_end) - start; - ret = btrfs_add_ordered_extent(inode, start, bytenr, - extent_num_bytes, - extent_num_bytes, 1, 0); - if (ret) { - err = ret; - goto out; + if (extent_type == BTRFS_FILE_EXTENT_REG) { + struct btrfs_block_group_cache *block_group; + disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); + extent_end = found_key.offset + + btrfs_file_extent_num_bytes(leaf, fi); + if (extent_end <= start) { + path->slots[0]++; + goto next_slot; + } + if (btrfs_file_extent_compression(leaf, fi) || + btrfs_file_extent_encryption(leaf, fi) || + btrfs_file_extent_other_encoding(leaf, fi)) + goto out_check; + if (disk_bytenr == 0) + goto out_check; + if (btrfs_cross_ref_exist(trans, root, disk_bytenr)) + goto out_check; + block_group = btrfs_lookup_block_group(root->fs_info, + disk_bytenr); + if (!block_group || block_group->ro) + goto out_check; + disk_bytenr += btrfs_file_extent_offset(leaf, fi); + nocow = 1; + } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { + extent_end = found_key.offset + + btrfs_file_extent_inline_len(leaf, fi); + extent_end = ALIGN(extent_end, root->sectorsize); + } else { + BUG_ON(1); + } +out_check: + if (extent_end <= start) { + path->slots[0]++; + goto next_slot; + } + if (!nocow) { + if (cow_start == (u64)-1) + cow_start = cur_offset; + cur_offset = extent_end; + if (cur_offset > end) + break; + path->slots[0]++; + goto next_slot; } btrfs_release_path(root, path); - start = extent_end; - if (start <= end) { - loops++; - goto again; + if (cow_start != (u64)-1) { + ret = cow_file_range(inode, locked_page, cow_start, + found_key.offset - 1, page_started); + BUG_ON(ret); + cow_start = (u64)-1; } - } else { -not_found: - btrfs_end_transaction(trans, root); - btrfs_free_path(path); - return cow_file_range(inode, locked_page, start, end, - page_started); + + disk_bytenr += cur_offset - found_key.offset; + num_bytes = min(end + 1, extent_end) - cur_offset; + + ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr, + num_bytes, num_bytes, + BTRFS_ORDERED_NOCOW); + cur_offset = extent_end; + if (cur_offset > end) + break; } -out: - WARN_ON(err); - btrfs_end_transaction(trans, root); + btrfs_release_path(root, path); + + if (cur_offset <= end && cow_start == (u64)-1) + cow_start = cur_offset; + if (cow_start != (u64)-1) { + ret = cow_file_range(inode, locked_page, cow_start, end, + page_started); + BUG_ON(ret); + } + + ret = btrfs_end_transaction(trans, root); + BUG_ON(ret); btrfs_free_path(path); - return err; + return 0; } /* diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index fd3c8b5676c..7f915d47839 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -112,6 +112,7 @@ static noinline int create_subvol(struct btrfs_root *root, btrfs_set_root_level(&root_item, 0); btrfs_set_root_refs(&root_item, 1); btrfs_set_root_used(&root_item, 0); + btrfs_set_root_last_snapshot(&root_item, 0); memset(&root_item.drop_progress, 0, sizeof(root_item.drop_progress)); root_item.drop_level = 0; diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index b5745bb96d4..e7317c8fda2 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -165,8 +165,7 @@ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, * inserted. */ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, - u64 start, u64 len, u64 disk_len, int nocow, - int compressed) + u64 start, u64 len, u64 disk_len, int type) { struct btrfs_ordered_inode_tree *tree; struct rb_node *node; @@ -183,10 +182,8 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, entry->len = len; entry->disk_len = disk_len; entry->inode = inode; - if (nocow) - set_bit(BTRFS_ORDERED_NOCOW, &entry->flags); - if (compressed) - set_bit(BTRFS_ORDERED_COMPRESSED, &entry->flags); + if (type == BTRFS_ORDERED_NOCOW || type == BTRFS_ORDERED_COMPRESSED) + set_bit(type, &entry->flags); /* one ref for the tree */ atomic_set(&entry->refs, 1); diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index 1ef464145d2..e6d9bc54c2b 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -132,8 +132,7 @@ int btrfs_remove_ordered_extent(struct inode *inode, int btrfs_dec_test_ordered_pending(struct inode *inode, u64 file_offset, u64 io_size); int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, - u64 start, u64 len, u64 disk_len, int nocow, - int compressed); + u64 start, u64 len, u64 disk_len, int type); int btrfs_add_ordered_sum(struct inode *inode, struct btrfs_ordered_extent *entry, struct btrfs_ordered_sum *sum); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 968b84f17a1..e72a013d24b 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -763,6 +763,8 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, if (ret) goto fail; + btrfs_record_root_in_trans(root); + btrfs_set_root_last_snapshot(&root->root_item, trans->transid); memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); key.objectid = objectid; -- cgit v1.2.3-70-g09d2 From d899e05215178fed903ad0e7fc1cb4d8e0cc0a88 Mon Sep 17 00:00:00 2001 From: Yan Zheng Date: Thu, 30 Oct 2008 14:25:28 -0400 Subject: Btrfs: Add fallocate support v2 This patch updates btrfs-progs for fallocate support. fallocate is a little different in Btrfs because we need to tell the COW system that a given preallocated extent doesn't need to be cow'd as long as there are no snapshots of it. This leverages the -o nodatacow checks. Signed-off-by: Yan Zheng --- fs/btrfs/ctree.h | 9 +- fs/btrfs/extent-tree.c | 65 +++++----- fs/btrfs/extent_io.c | 12 +- fs/btrfs/extent_map.h | 1 + fs/btrfs/file.c | 245 +++++++++++++++++++++++++++++++++++- fs/btrfs/inode.c | 323 +++++++++++++++++++++++++++++++++++++----------- fs/btrfs/ioctl.c | 3 +- fs/btrfs/ordered-data.c | 5 +- fs/btrfs/ordered-data.h | 4 +- fs/btrfs/tree-log.c | 13 +- 10 files changed, 563 insertions(+), 117 deletions(-) (limited to 'fs/btrfs/ordered-data.c') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8bf6a085a73..d5ba3d1aaf9 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -462,8 +462,9 @@ struct btrfs_root_item { u8 level; } __attribute__ ((__packed__)); -#define BTRFS_FILE_EXTENT_REG 0 -#define BTRFS_FILE_EXTENT_INLINE 1 +#define BTRFS_FILE_EXTENT_INLINE 0 +#define BTRFS_FILE_EXTENT_REG 1 +#define BTRFS_FILE_EXTENT_PREALLOC 2 struct btrfs_file_extent_item { /* @@ -868,6 +869,7 @@ struct btrfs_root { #define BTRFS_INODE_NODATACOW (1 << 1) #define BTRFS_INODE_READONLY (1 << 2) #define BTRFS_INODE_NOCOMPRESS (1 << 3) +#define BTRFS_INODE_PREALLOC (1 << 4) #define btrfs_clear_flag(inode, flag) (BTRFS_I(inode)->flags &= \ ~BTRFS_INODE_##flag) #define btrfs_set_flag(inode, flag) (BTRFS_I(inode)->flags |= \ @@ -1924,6 +1926,9 @@ extern struct file_operations btrfs_file_operations; int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, u64 start, u64 end, u64 inline_limit, u64 *hint_block); +int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct inode *inode, u64 start, u64 end); int btrfs_release_file(struct inode *inode, struct file *file); /* tree-defrag.c */ diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 1eb69a91b72..8af39521eb7 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2147,6 +2147,9 @@ static int noinline find_free_extent(struct btrfs_trans_handle *trans, total_needed += empty_size; block_group = btrfs_lookup_block_group(root->fs_info, search_start); + if (!block_group) + block_group = btrfs_lookup_first_block_group(root->fs_info, + search_start); space_info = __find_space_info(root->fs_info, data); down_read(&space_info->groups_sem); @@ -3426,9 +3429,7 @@ walk_down: next: level--; btrfs_release_path(extent_root, path); - if (need_resched()) { - cond_resched(); - } + cond_resched(); } /* reached lowest level */ ret = 1; @@ -3539,9 +3540,7 @@ found: } btrfs_release_path(extent_root, path); - if (need_resched()) { - cond_resched(); - } + cond_resched(); } /* reached max tree level, but no tree root found. */ BUG(); @@ -3654,8 +3653,9 @@ static int noinline get_new_locations(struct inode *reloc_inode, exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi); exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf, fi); - WARN_ON(exts[nr].offset > 0); - WARN_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes); + BUG_ON(exts[nr].offset > 0); + BUG_ON(exts[nr].compression || exts[nr].encryption); + BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes); cur_pos += exts[nr].num_bytes; nr++; @@ -3709,6 +3709,7 @@ static int noinline replace_one_extent(struct btrfs_trans_handle *trans, u32 nritems; int nr_scaned = 0; int extent_locked = 0; + int extent_type; int ret; memcpy(&key, leaf_key, sizeof(key)); @@ -3781,8 +3782,9 @@ next: } fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); - if ((btrfs_file_extent_type(leaf, fi) != - BTRFS_FILE_EXTENT_REG) || + extent_type = btrfs_file_extent_type(leaf, fi); + if ((extent_type != BTRFS_FILE_EXTENT_REG && + extent_type != BTRFS_FILE_EXTENT_PREALLOC) || (btrfs_file_extent_disk_bytenr(leaf, fi) != extent_key->objectid)) { path->slots[0]++; @@ -3865,16 +3867,10 @@ next: if (nr_extents == 1) { /* update extent pointer in place */ - btrfs_set_file_extent_generation(leaf, fi, - trans->transid); btrfs_set_file_extent_disk_bytenr(leaf, fi, new_extents[0].disk_bytenr); btrfs_set_file_extent_disk_num_bytes(leaf, fi, new_extents[0].disk_num_bytes); - btrfs_set_file_extent_ram_bytes(leaf, fi, - new_extents[0].ram_bytes); - ext_offset += new_extents[0].offset; - btrfs_set_file_extent_offset(leaf, fi, ext_offset); btrfs_mark_buffer_dirty(leaf); btrfs_drop_extent_cache(inode, key.offset, @@ -3901,6 +3897,8 @@ next: btrfs_release_path(root, path); key.offset += num_bytes; } else { + BUG_ON(1); +#if 0 u64 alloc_hint; u64 extent_len; int i; @@ -3977,6 +3975,7 @@ next: break; } BUG_ON(i >= nr_extents); +#endif } if (extent_locked) { @@ -4156,15 +4155,10 @@ static int noinline replace_extents_in_leaf(struct btrfs_trans_handle *trans, ref->extents[ext_index].bytenr = new_extent->disk_bytenr; ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes; - btrfs_set_file_extent_generation(leaf, fi, trans->transid); - btrfs_set_file_extent_ram_bytes(leaf, fi, - new_extent->ram_bytes); btrfs_set_file_extent_disk_bytenr(leaf, fi, new_extent->disk_bytenr); btrfs_set_file_extent_disk_num_bytes(leaf, fi, new_extent->disk_num_bytes); - new_extent->offset += btrfs_file_extent_offset(leaf, fi); - btrfs_set_file_extent_offset(leaf, fi, new_extent->offset); btrfs_mark_buffer_dirty(leaf); ret = btrfs_inc_extent_ref(trans, root, @@ -4625,12 +4619,15 @@ static int noinline relocate_one_extent(struct btrfs_root *extent_root, */ if (!new_extents) { u64 group_start = group->key.objectid; + new_extents = kmalloc(sizeof(*new_extents), + GFP_NOFS); + nr_extents = 1; ret = get_new_locations(reloc_inode, extent_key, - group_start, 0, + group_start, 1, &new_extents, &nr_extents); - if (ret < 0) + if (ret) goto out; } btrfs_record_root_in_trans(found_root); @@ -4762,7 +4759,8 @@ static int __insert_orphan_inode(struct btrfs_trans_handle *trans, btrfs_set_inode_generation(leaf, item, 1); btrfs_set_inode_size(leaf, item, size); btrfs_set_inode_mode(leaf, item, S_IFREG | 0600); - btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NODATASUM); + btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NODATASUM | + BTRFS_INODE_NOCOMPRESS); btrfs_mark_buffer_dirty(leaf); btrfs_release_path(root, path); out: @@ -4835,6 +4833,7 @@ int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start) struct inode *reloc_inode; struct btrfs_block_group_cache *block_group; struct btrfs_key key; + u64 skipped; u64 cur_byte; u64 total_found; u32 nritems; @@ -4864,6 +4863,7 @@ int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start) btrfs_start_delalloc_inodes(info->tree_root); btrfs_wait_ordered_extents(info->tree_root, 0); again: + skipped = 0; total_found = 0; progress = 0; key.objectid = block_group->key.objectid; @@ -4926,6 +4926,8 @@ next: ret = relocate_one_extent(root, path, &key, block_group, reloc_inode, pass); BUG_ON(ret < 0); + if (ret > 0) + skipped++; key.objectid = cur_byte; key.type = 0; @@ -4944,6 +4946,11 @@ next: printk("btrfs found %llu extents in pass %d\n", (unsigned long long)total_found, pass); pass++; + if (total_found == skipped && pass > 2) { + iput(reloc_inode); + reloc_inode = create_reloc_inode(info, block_group); + pass = 0; + } goto again; } @@ -5011,17 +5018,17 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) while ((n = rb_last(&info->block_group_cache_tree)) != NULL) { block_group = rb_entry(n, struct btrfs_block_group_cache, cache_node); - - spin_unlock(&info->block_group_cache_lock); - btrfs_remove_free_space_cache(block_group); - spin_lock(&info->block_group_cache_lock); - rb_erase(&block_group->cache_node, &info->block_group_cache_tree); + spin_unlock(&info->block_group_cache_lock); + + btrfs_remove_free_space_cache(block_group); down_write(&block_group->space_info->groups_sem); list_del(&block_group->list); up_write(&block_group->space_info->groups_sem); kfree(block_group); + + spin_lock(&info->block_group_cache_lock); } spin_unlock(&info->block_group_cache_lock); return 0; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 65a0583027e..eb3c12e7bea 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2015,6 +2015,8 @@ printk("2bad mapping end %Lu cur %Lu\n", end, cur); } bdev = em->bdev; block_start = em->block_start; + if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) + block_start = EXTENT_MAP_HOLE; free_extent_map(em); em = NULL; @@ -2769,14 +2771,18 @@ sector_t extent_bmap(struct address_space *mapping, sector_t iblock, struct inode *inode = mapping->host; u64 start = iblock << inode->i_blkbits; sector_t sector = 0; + size_t blksize = (1 << inode->i_blkbits); struct extent_map *em; - em = get_extent(inode, NULL, 0, start, (1 << inode->i_blkbits), 0); + lock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1, + GFP_NOFS); + em = get_extent(inode, NULL, 0, start, blksize, 0); + unlock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1, + GFP_NOFS); if (!em || IS_ERR(em)) return 0; - if (em->block_start == EXTENT_MAP_INLINE || - em->block_start == EXTENT_MAP_HOLE) + if (em->block_start > EXTENT_MAP_LAST_BYTE) goto out; sector = (em->block_start + start - em->start) >> inode->i_blkbits; diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h index e693e1b4ac4..accfedaeb51 100644 --- a/fs/btrfs/extent_map.h +++ b/fs/btrfs/extent_map.h @@ -12,6 +12,7 @@ #define EXTENT_FLAG_PINNED 0 /* this entry not yet on disk, don't free it */ #define EXTENT_FLAG_COMPRESSED 1 #define EXTENT_FLAG_VACANCY 2 /* no file extent item found */ +#define EXTENT_FLAG_PREALLOC 3 /* pre-allocated extent */ struct extent_map { struct rb_node rb_node; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 1a0510ad030..238a8e215eb 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -381,7 +381,7 @@ int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans, int keep; int slot; int bookend; - int found_type; + int found_type = 0; int found_extent; int found_inline; int recow; @@ -442,7 +442,8 @@ next_slot: extent); other_encoding = btrfs_file_extent_other_encoding(leaf, extent); - if (found_type == BTRFS_FILE_EXTENT_REG) { + if (found_type == BTRFS_FILE_EXTENT_REG || + found_type == BTRFS_FILE_EXTENT_PREALLOC) { extent_end = btrfs_file_extent_disk_bytenr(leaf, extent); @@ -609,8 +610,7 @@ next_slot: */ btrfs_set_file_extent_ram_bytes(leaf, extent, ram_bytes); - btrfs_set_file_extent_type(leaf, extent, - BTRFS_FILE_EXTENT_REG); + btrfs_set_file_extent_type(leaf, extent, found_type); btrfs_mark_buffer_dirty(path->nodes[0]); @@ -661,6 +661,243 @@ out: return ret; } +static int extent_mergeable(struct extent_buffer *leaf, int slot, + u64 objectid, u64 bytenr, u64 *start, u64 *end) +{ + struct btrfs_file_extent_item *fi; + struct btrfs_key key; + u64 extent_end; + + if (slot < 0 || slot >= btrfs_header_nritems(leaf)) + return 0; + + btrfs_item_key_to_cpu(leaf, &key, slot); + if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY) + return 0; + + fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); + if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG || + btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr || + btrfs_file_extent_compression(leaf, fi) || + btrfs_file_extent_encryption(leaf, fi) || + btrfs_file_extent_other_encoding(leaf, fi)) + return 0; + + extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); + if ((*start && *start != key.offset) || (*end && *end != extent_end)) + return 0; + + *start = key.offset; + *end = extent_end; + return 1; +} + +/* + * Mark extent in the range start - end as written. + * + * This changes extent type from 'pre-allocated' to 'regular'. If only + * part of extent is marked as written, the extent will be split into + * two or three. + */ +int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct inode *inode, u64 start, u64 end) +{ + struct extent_buffer *leaf; + struct btrfs_path *path; + struct btrfs_file_extent_item *fi; + struct btrfs_key key; + u64 bytenr; + u64 num_bytes; + u64 extent_end; + u64 extent_offset; + u64 other_start; + u64 other_end; + u64 split = start; + u64 locked_end = end; + int extent_type; + int split_end = 1; + int ret; + + btrfs_drop_extent_cache(inode, start, end - 1, 0); + + path = btrfs_alloc_path(); + BUG_ON(!path); +again: + key.objectid = inode->i_ino; + key.type = BTRFS_EXTENT_DATA_KEY; + if (split == start) + key.offset = split; + else + key.offset = split - 1; + + ret = btrfs_search_slot(trans, root, &key, path, -1, 1); + if (ret > 0 && path->slots[0] > 0) + path->slots[0]--; + + leaf = path->nodes[0]; + btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); + BUG_ON(key.objectid != inode->i_ino || + key.type != BTRFS_EXTENT_DATA_KEY); + fi = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_file_extent_item); + extent_type = btrfs_file_extent_type(leaf, fi); + BUG_ON(extent_type != BTRFS_FILE_EXTENT_PREALLOC); + extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); + BUG_ON(key.offset > start || extent_end < end); + + bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); + num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); + extent_offset = btrfs_file_extent_offset(leaf, fi); + + if (key.offset == start) + split = end; + + if (key.offset == start && extent_end == end) { + int del_nr = 0; + int del_slot = 0; + u64 leaf_owner = btrfs_header_owner(leaf); + u64 leaf_gen = btrfs_header_generation(leaf); + other_start = end; + other_end = 0; + if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino, + bytenr, &other_start, &other_end)) { + extent_end = other_end; + del_slot = path->slots[0] + 1; + del_nr++; + ret = btrfs_free_extent(trans, root, bytenr, num_bytes, + leaf->start, leaf_owner, + leaf_gen, inode->i_ino, 0); + BUG_ON(ret); + } + other_start = 0; + other_end = start; + if (extent_mergeable(leaf, path->slots[0] - 1, inode->i_ino, + bytenr, &other_start, &other_end)) { + key.offset = other_start; + del_slot = path->slots[0]; + del_nr++; + ret = btrfs_free_extent(trans, root, bytenr, num_bytes, + leaf->start, leaf_owner, + leaf_gen, inode->i_ino, 0); + BUG_ON(ret); + } + split_end = 0; + if (del_nr == 0) { + btrfs_set_file_extent_type(leaf, fi, + BTRFS_FILE_EXTENT_REG); + goto done; + } + + fi = btrfs_item_ptr(leaf, del_slot - 1, + struct btrfs_file_extent_item); + btrfs_set_file_extent_type(leaf, fi, BTRFS_FILE_EXTENT_REG); + btrfs_set_file_extent_num_bytes(leaf, fi, + extent_end - key.offset); + btrfs_mark_buffer_dirty(leaf); + + ret = btrfs_del_items(trans, root, path, del_slot, del_nr); + BUG_ON(ret); + goto done; + } else if (split == start) { + if (locked_end < extent_end) { + ret = try_lock_extent(&BTRFS_I(inode)->io_tree, + locked_end, extent_end - 1, GFP_NOFS); + if (!ret) { + btrfs_release_path(root, path); + lock_extent(&BTRFS_I(inode)->io_tree, + locked_end, extent_end - 1, GFP_NOFS); + locked_end = extent_end; + goto again; + } + locked_end = extent_end; + } + btrfs_set_file_extent_num_bytes(leaf, fi, split - key.offset); + extent_offset += split - key.offset; + } else { + BUG_ON(key.offset != start); + btrfs_set_file_extent_offset(leaf, fi, extent_offset + + split - key.offset); + btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - split); + key.offset = split; + btrfs_set_item_key_safe(trans, root, path, &key); + extent_end = split; + } + + if (extent_end == end) { + split_end = 0; + extent_type = BTRFS_FILE_EXTENT_REG; + } + if (extent_end == end && split == start) { + other_start = end; + other_end = 0; + if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino, + bytenr, &other_start, &other_end)) { + path->slots[0]++; + fi = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_file_extent_item); + key.offset = split; + btrfs_set_item_key_safe(trans, root, path, &key); + btrfs_set_file_extent_offset(leaf, fi, extent_offset); + btrfs_set_file_extent_num_bytes(leaf, fi, + other_end - split); + goto done; + } + } + if (extent_end == end && split == end) { + other_start = 0; + other_end = start; + if (extent_mergeable(leaf, path->slots[0] - 1 , inode->i_ino, + bytenr, &other_start, &other_end)) { + path->slots[0]--; + fi = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_file_extent_item); + btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - + other_start); + goto done; + } + } + + btrfs_mark_buffer_dirty(leaf); + btrfs_release_path(root, path); + + key.offset = start; + ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*fi)); + BUG_ON(ret); + + leaf = path->nodes[0]; + fi = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_file_extent_item); + btrfs_set_file_extent_generation(leaf, fi, trans->transid); + btrfs_set_file_extent_type(leaf, fi, extent_type); + btrfs_set_file_extent_disk_bytenr(leaf, fi, bytenr); + btrfs_set_file_extent_disk_num_bytes(leaf, fi, num_bytes); + btrfs_set_file_extent_offset(leaf, fi, extent_offset); + btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - key.offset); + btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes); + btrfs_set_file_extent_compression(leaf, fi, 0); + btrfs_set_file_extent_encryption(leaf, fi, 0); + btrfs_set_file_extent_other_encoding(leaf, fi, 0); + + ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, + leaf->start, root->root_key.objectid, + trans->transid, inode->i_ino); + BUG_ON(ret); +done: + btrfs_mark_buffer_dirty(leaf); + btrfs_release_path(root, path); + if (split_end && split == start) { + split = end; + goto again; + } + if (locked_end > end) { + unlock_extent(&BTRFS_I(inode)->io_tree, end, locked_end - 1, + GFP_NOFS); + } + btrfs_free_path(path); + return 0; +} + /* * this gets pages into the page cache and locks them down, it also properly * waits for data=ordered extents to finish before allowing the pages to be diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 3e6f0568fdb..789c376157f 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -37,6 +37,7 @@ #include #include #include +#include #include "ctree.h" #include "disk-io.h" #include "transaction.h" @@ -587,7 +588,7 @@ free_pages_out: * blocks on disk */ static int run_delalloc_nocow(struct inode *inode, struct page *locked_page, - u64 start, u64 end, int *page_started) + u64 start, u64 end, int *page_started, int force) { struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_trans_handle *trans; @@ -602,6 +603,7 @@ static int run_delalloc_nocow(struct inode *inode, struct page *locked_page, u64 num_bytes; int extent_type; int ret; + int type; int nocow; int check_prev = 1; @@ -654,7 +656,8 @@ next_slot: struct btrfs_file_extent_item); extent_type = btrfs_file_extent_type(leaf, fi); - if (extent_type == BTRFS_FILE_EXTENT_REG) { + if (extent_type == BTRFS_FILE_EXTENT_REG || + extent_type == BTRFS_FILE_EXTENT_PREALLOC) { struct btrfs_block_group_cache *block_group; disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); extent_end = found_key.offset + @@ -669,6 +672,8 @@ next_slot: goto out_check; if (disk_bytenr == 0) goto out_check; + if (extent_type == BTRFS_FILE_EXTENT_REG && !force) + goto out_check; if (btrfs_cross_ref_exist(trans, root, disk_bytenr)) goto out_check; block_group = btrfs_lookup_block_group(root->fs_info, @@ -709,10 +714,39 @@ out_check: disk_bytenr += cur_offset - found_key.offset; num_bytes = min(end + 1, extent_end) - cur_offset; + if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) { + struct extent_map *em; + struct extent_map_tree *em_tree; + em_tree = &BTRFS_I(inode)->extent_tree; + em = alloc_extent_map(GFP_NOFS); + em->start = cur_offset; + em->len = num_bytes; + em->block_len = num_bytes; + em->block_start = disk_bytenr; + em->bdev = root->fs_info->fs_devices->latest_bdev; + set_bit(EXTENT_FLAG_PINNED, &em->flags); + while (1) { + spin_lock(&em_tree->lock); + ret = add_extent_mapping(em_tree, em); + spin_unlock(&em_tree->lock); + if (ret != -EEXIST) { + free_extent_map(em); + break; + } + btrfs_drop_extent_cache(inode, em->start, + em->start + em->len - 1, 0); + } + type = BTRFS_ORDERED_PREALLOC; + } else { + type = BTRFS_ORDERED_NOCOW; + } ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr, - num_bytes, num_bytes, - BTRFS_ORDERED_NOCOW); + num_bytes, num_bytes, type); + BUG_ON(ret); + extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, + cur_offset, cur_offset + num_bytes - 1, + locked_page, 0, 0, 0); cur_offset = extent_end; if (cur_offset > end) break; @@ -745,7 +779,10 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page, if (btrfs_test_opt(root, NODATACOW) || btrfs_test_flag(inode, NODATACOW)) ret = run_delalloc_nocow(inode, locked_page, start, end, - page_started); + page_started, 0); + else if (btrfs_test_flag(inode, PREALLOC)) + ret = run_delalloc_nocow(inode, locked_page, start, end, + page_started, 1); else ret = cow_file_range(inode, locked_page, start, end, page_started); @@ -1006,6 +1043,63 @@ int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end) return -EAGAIN; } +static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, + struct inode *inode, u64 file_pos, + u64 disk_bytenr, u64 disk_num_bytes, + u64 num_bytes, u64 ram_bytes, + u8 compression, u8 encryption, + u16 other_encoding, int extent_type) +{ + struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_file_extent_item *fi; + struct btrfs_path *path; + struct extent_buffer *leaf; + struct btrfs_key ins; + u64 hint; + int ret; + + path = btrfs_alloc_path(); + BUG_ON(!path); + + ret = btrfs_drop_extents(trans, root, inode, file_pos, + file_pos + num_bytes, file_pos, &hint); + BUG_ON(ret); + + ins.objectid = inode->i_ino; + ins.offset = file_pos; + ins.type = BTRFS_EXTENT_DATA_KEY; + ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi)); + BUG_ON(ret); + leaf = path->nodes[0]; + fi = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_file_extent_item); + btrfs_set_file_extent_generation(leaf, fi, trans->transid); + btrfs_set_file_extent_type(leaf, fi, extent_type); + btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr); + btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes); + btrfs_set_file_extent_offset(leaf, fi, 0); + btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes); + btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes); + btrfs_set_file_extent_compression(leaf, fi, compression); + btrfs_set_file_extent_encryption(leaf, fi, encryption); + btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding); + btrfs_mark_buffer_dirty(leaf); + + inode_add_bytes(inode, num_bytes); + btrfs_drop_extent_cache(inode, file_pos, file_pos + num_bytes - 1, 0); + + ins.objectid = disk_bytenr; + ins.offset = disk_num_bytes; + ins.type = BTRFS_EXTENT_ITEM_KEY; + ret = btrfs_alloc_reserved_extent(trans, root, leaf->start, + root->root_key.objectid, + trans->transid, inode->i_ino, &ins); + BUG_ON(ret); + + btrfs_free_path(path); + return 0; +} + /* as ordered data IO finishes, this gets called so we can finish * an ordered extent if the range of bytes in the file it covers are * fully written. @@ -1016,12 +1110,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) struct btrfs_trans_handle *trans; struct btrfs_ordered_extent *ordered_extent; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; - struct btrfs_file_extent_item *extent_item; - struct btrfs_path *path = NULL; - struct extent_buffer *leaf; - u64 alloc_hint = 0; - struct list_head list; - struct btrfs_key ins; + int compressed = 0; int ret; ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1); @@ -1035,67 +1124,30 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) goto nocow; - path = btrfs_alloc_path(); - BUG_ON(!path); - lock_extent(io_tree, ordered_extent->file_offset, ordered_extent->file_offset + ordered_extent->len - 1, GFP_NOFS); - INIT_LIST_HEAD(&list); - - ret = btrfs_drop_extents(trans, root, inode, - ordered_extent->file_offset, - ordered_extent->file_offset + - ordered_extent->len, - ordered_extent->file_offset, &alloc_hint); - BUG_ON(ret); - - ins.objectid = inode->i_ino; - ins.offset = ordered_extent->file_offset; - ins.type = BTRFS_EXTENT_DATA_KEY; - ret = btrfs_insert_empty_item(trans, root, path, &ins, - sizeof(*extent_item)); - BUG_ON(ret); - leaf = path->nodes[0]; - extent_item = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_file_extent_item); - btrfs_set_file_extent_generation(leaf, extent_item, trans->transid); - btrfs_set_file_extent_type(leaf, extent_item, BTRFS_FILE_EXTENT_REG); - btrfs_set_file_extent_disk_bytenr(leaf, extent_item, - ordered_extent->start); - btrfs_set_file_extent_disk_num_bytes(leaf, extent_item, - ordered_extent->disk_len); - btrfs_set_file_extent_offset(leaf, extent_item, 0); - if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) - btrfs_set_file_extent_compression(leaf, extent_item, 1); - else - btrfs_set_file_extent_compression(leaf, extent_item, 0); - btrfs_set_file_extent_encryption(leaf, extent_item, 0); - btrfs_set_file_extent_other_encoding(leaf, extent_item, 0); - - /* ram bytes = extent_num_bytes for now */ - btrfs_set_file_extent_num_bytes(leaf, extent_item, - ordered_extent->len); - btrfs_set_file_extent_ram_bytes(leaf, extent_item, - ordered_extent->len); - btrfs_mark_buffer_dirty(leaf); - - btrfs_drop_extent_cache(inode, ordered_extent->file_offset, - ordered_extent->file_offset + - ordered_extent->len - 1, 0); - - ins.objectid = ordered_extent->start; - ins.offset = ordered_extent->disk_len; - ins.type = BTRFS_EXTENT_ITEM_KEY; - ret = btrfs_alloc_reserved_extent(trans, root, leaf->start, - root->root_key.objectid, - trans->transid, inode->i_ino, &ins); - BUG_ON(ret); - btrfs_release_path(root, path); - - inode_add_bytes(inode, ordered_extent->len); + compressed = 1; + if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { + BUG_ON(compressed); + ret = btrfs_mark_extent_written(trans, root, inode, + ordered_extent->file_offset, + ordered_extent->file_offset + + ordered_extent->len); + BUG_ON(ret); + } else { + ret = insert_reserved_file_extent(trans, inode, + ordered_extent->file_offset, + ordered_extent->start, + ordered_extent->disk_len, + ordered_extent->len, + ordered_extent->len, + compressed, 0, 0, + BTRFS_FILE_EXTENT_REG); + BUG_ON(ret); + } unlock_extent(io_tree, ordered_extent->file_offset, ordered_extent->file_offset + ordered_extent->len - 1, GFP_NOFS); @@ -1115,8 +1167,6 @@ nocow: btrfs_put_ordered_extent(ordered_extent); btrfs_end_transaction(trans, root); - if (path) - btrfs_free_path(path); return 0; } @@ -3488,7 +3538,8 @@ again: found_type = btrfs_file_extent_type(leaf, item); extent_start = found_key.offset; compressed = btrfs_file_extent_compression(leaf, item); - if (found_type == BTRFS_FILE_EXTENT_REG) { + if (found_type == BTRFS_FILE_EXTENT_REG || + found_type == BTRFS_FILE_EXTENT_PREALLOC) { extent_end = extent_start + btrfs_file_extent_num_bytes(leaf, item); } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { @@ -3521,7 +3572,8 @@ again: goto not_found_em; } - if (found_type == BTRFS_FILE_EXTENT_REG) { + if (found_type == BTRFS_FILE_EXTENT_REG || + found_type == BTRFS_FILE_EXTENT_PREALLOC) { em->start = extent_start; em->len = extent_end - extent_start; bytenr = btrfs_file_extent_disk_bytenr(leaf, item); @@ -3538,6 +3590,8 @@ again: bytenr += btrfs_file_extent_offset(leaf, item); em->block_start = bytenr; em->block_len = em->len; + if (found_type == BTRFS_FILE_EXTENT_PREALLOC) + set_bit(EXTENT_FLAG_PREALLOC, &em->flags); } goto insert; } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { @@ -3969,6 +4023,7 @@ int btrfs_create_subvol_root(struct btrfs_root *new_root, struct dentry *dentry, if (error) return error; + atomic_inc(&inode->i_count); d_instantiate(dentry, inode); return 0; } @@ -4318,6 +4373,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, inode->i_op = &btrfs_symlink_inode_operations; inode->i_mapping->a_ops = &btrfs_symlink_aops; inode->i_mapping->backing_dev_info = &root->fs_info->bdi; + inode_set_bytes(inode, name_len); btrfs_i_size_write(inode, name_len - 1); err = btrfs_update_inode(trans, root, inode); if (err) @@ -4335,6 +4391,130 @@ out_fail: return err; } +static int prealloc_file_range(struct inode *inode, u64 start, u64 end, + u64 alloc_hint, int mode) +{ + struct btrfs_trans_handle *trans; + struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_key ins; + u64 alloc_size; + u64 cur_offset = start; + u64 num_bytes = end - start; + int ret = 0; + + trans = btrfs_join_transaction(root, 1); + BUG_ON(!trans); + btrfs_set_trans_block_group(trans, inode); + + while (num_bytes > 0) { + alloc_size = min(num_bytes, root->fs_info->max_extent); + ret = btrfs_reserve_extent(trans, root, alloc_size, + root->sectorsize, 0, alloc_hint, + (u64)-1, &ins, 1); + if (ret) { + WARN_ON(1); + goto out; + } + ret = insert_reserved_file_extent(trans, inode, + cur_offset, ins.objectid, + ins.offset, ins.offset, + ins.offset, 0, 0, 0, + BTRFS_FILE_EXTENT_PREALLOC); + BUG_ON(ret); + num_bytes -= ins.offset; + cur_offset += ins.offset; + alloc_hint = ins.objectid + ins.offset; + } +out: + if (cur_offset > start) { + inode->i_ctime = CURRENT_TIME; + btrfs_set_flag(inode, PREALLOC); + if (!(mode & FALLOC_FL_KEEP_SIZE) && + cur_offset > i_size_read(inode)) + btrfs_i_size_write(inode, cur_offset); + ret = btrfs_update_inode(trans, root, inode); + BUG_ON(ret); + } + + btrfs_end_transaction(trans, root); + return ret; +} + +static long btrfs_fallocate(struct inode *inode, int mode, + loff_t offset, loff_t len) +{ + u64 cur_offset; + u64 last_byte; + u64 alloc_start; + u64 alloc_end; + u64 alloc_hint = 0; + u64 mask = BTRFS_I(inode)->root->sectorsize - 1; + struct extent_map *em; + int ret; + + alloc_start = offset & ~mask; + alloc_end = (offset + len + mask) & ~mask; + + mutex_lock(&inode->i_mutex); + if (alloc_start > inode->i_size) { + ret = btrfs_cont_expand(inode, alloc_start); + if (ret) + goto out; + } + + while (1) { + struct btrfs_ordered_extent *ordered; + lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, + alloc_end - 1, GFP_NOFS); + ordered = btrfs_lookup_first_ordered_extent(inode, + alloc_end - 1); + if (ordered && + ordered->file_offset + ordered->len > alloc_start && + ordered->file_offset < alloc_end) { + btrfs_put_ordered_extent(ordered); + unlock_extent(&BTRFS_I(inode)->io_tree, + alloc_start, alloc_end - 1, GFP_NOFS); + btrfs_wait_ordered_range(inode, alloc_start, + alloc_end - alloc_start); + } else { + if (ordered) + btrfs_put_ordered_extent(ordered); + break; + } + } + + cur_offset = alloc_start; + while (1) { + em = btrfs_get_extent(inode, NULL, 0, cur_offset, + alloc_end - cur_offset, 0); + BUG_ON(IS_ERR(em) || !em); + last_byte = min(extent_map_end(em), alloc_end); + last_byte = (last_byte + mask) & ~mask; + if (em->block_start == EXTENT_MAP_HOLE) { + ret = prealloc_file_range(inode, cur_offset, + last_byte, alloc_hint, mode); + if (ret < 0) { + free_extent_map(em); + break; + } + } + if (em->block_start <= EXTENT_MAP_LAST_BYTE) + alloc_hint = em->block_start; + free_extent_map(em); + + cur_offset = last_byte; + if (cur_offset >= alloc_end) { + ret = 0; + break; + } + } + unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, alloc_end - 1, + GFP_NOFS); +out: + mutex_unlock(&inode->i_mutex); + return ret; +} + static int btrfs_set_page_dirty(struct page *page) { return __set_page_dirty_nobuffers(page); @@ -4421,6 +4601,7 @@ static struct inode_operations btrfs_file_inode_operations = { .listxattr = btrfs_listxattr, .removexattr = btrfs_removexattr, .permission = btrfs_permission, + .fallocate = btrfs_fallocate, }; static struct inode_operations btrfs_special_inode_operations = { .getattr = btrfs_getattr, diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 7f915d47839..9ff2b4e0e92 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -724,7 +724,8 @@ long btrfs_ioctl_clone(struct file *file, unsigned long src_fd) extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); found_type = btrfs_file_extent_type(leaf, extent); - if (found_type == BTRFS_FILE_EXTENT_REG) { + if (found_type == BTRFS_FILE_EXTENT_REG || + found_type == BTRFS_FILE_EXTENT_PREALLOC) { u64 ds = btrfs_file_extent_disk_bytenr(leaf, extent); u64 dl = btrfs_file_extent_disk_num_bytes(leaf, diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index e7317c8fda2..370bb428559 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -182,7 +182,7 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, entry->len = len; entry->disk_len = disk_len; entry->inode = inode; - if (type == BTRFS_ORDERED_NOCOW || type == BTRFS_ORDERED_COMPRESSED) + if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE) set_bit(type, &entry->flags); /* one ref for the tree */ @@ -339,7 +339,8 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only) ordered = list_entry(cur, struct btrfs_ordered_extent, root_extent_list); if (nocow_only && - !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) { + !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) && + !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) { list_move(&ordered->root_extent_list, &root->fs_info->ordered_extents); cond_resched_lock(&root->fs_info->ordered_extent_lock); diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index e6d9bc54c2b..260bf95dfe0 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -68,6 +68,8 @@ struct btrfs_ordered_sum { #define BTRFS_ORDERED_COMPRESSED 3 /* writing a compressed extent */ +#define BTRFS_ORDERED_PREALLOC 4 /* set when writing to prealloced extent */ + struct btrfs_ordered_extent { /* logical offset in the file */ u64 file_offset; @@ -132,7 +134,7 @@ int btrfs_remove_ordered_extent(struct inode *inode, int btrfs_dec_test_ordered_pending(struct inode *inode, u64 file_offset, u64 io_size); int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, - u64 start, u64 len, u64 disk_len, int type); + u64 start, u64 len, u64 disk_len, int tyep); int btrfs_add_ordered_sum(struct inode *inode, struct btrfs_ordered_extent *entry, struct btrfs_ordered_sum *sum); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index e0201c3a7dc..be4fc30a30e 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -442,7 +442,8 @@ insert: fi = (struct btrfs_file_extent_item *)dst_ptr; extent_type = btrfs_file_extent_type(path->nodes[0], fi); - if (extent_type == BTRFS_FILE_EXTENT_REG) { + if (extent_type == BTRFS_FILE_EXTENT_REG || + extent_type == BTRFS_FILE_EXTENT_PREALLOC) { struct btrfs_key ins; ins.objectid = btrfs_file_extent_disk_bytenr( path->nodes[0], fi); @@ -538,7 +539,8 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); found_type = btrfs_file_extent_type(eb, item); - if (found_type == BTRFS_FILE_EXTENT_REG) + if (found_type == BTRFS_FILE_EXTENT_REG || + found_type == BTRFS_FILE_EXTENT_PREALLOC) extent_end = start + btrfs_file_extent_num_bytes(eb, item); else if (found_type == BTRFS_FILE_EXTENT_INLINE) { size = btrfs_file_extent_inline_len(eb, item); @@ -562,7 +564,9 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, start, 0); - if (ret == 0 && found_type == BTRFS_FILE_EXTENT_REG) { + if (ret == 0 && + (found_type == BTRFS_FILE_EXTENT_REG || + found_type == BTRFS_FILE_EXTENT_PREALLOC)) { struct btrfs_file_extent_item cmp1; struct btrfs_file_extent_item cmp2; struct btrfs_file_extent_item *existing; @@ -2522,7 +2526,8 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, struct btrfs_file_extent_item); found_type = btrfs_file_extent_type(src, extent); - if (found_type == BTRFS_FILE_EXTENT_REG) { + if (found_type == BTRFS_FILE_EXTENT_REG || + found_type == BTRFS_FILE_EXTENT_PREALLOC) { u64 ds = btrfs_file_extent_disk_bytenr(src, extent); u64 dl = btrfs_file_extent_disk_num_bytes(src, -- cgit v1.2.3-70-g09d2 From 771ed689d2cd53439e28e095bc38fbe40a71429e Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Thu, 6 Nov 2008 22:02:51 -0500 Subject: Btrfs: Optimize compressed writeback and reads When reading compressed extents, try to put pages into the page cache for any pages covered by the compressed extent that readpages didn't already preload. Add an async work queue to handle transformations at delayed allocation processing time. Right now this is just compression. The workflow is: 1) Find offsets in the file marked for delayed allocation 2) Lock the pages 3) Lock the state bits 4) Call the async delalloc code The async delalloc code clears the state lock bits and delalloc bits. It is important this happens before the range goes into the work queue because otherwise it might deadlock with other work queue items that try to lock those extent bits. The file pages are compressed, and if the compression doesn't work the pages are written back directly. An ordered work queue is used to make sure the inodes are written in the same order that pdflush or writepages sent them down. This changes extent_write_cache_pages to let the writepage function update the wbc nr_written count. Signed-off-by: Chris Mason --- fs/btrfs/compression.c | 150 ++++++++++- fs/btrfs/ctree.h | 4 +- fs/btrfs/disk-io.c | 27 +- fs/btrfs/extent-tree.c | 6 +- fs/btrfs/extent_io.c | 140 +++++++++-- fs/btrfs/extent_io.h | 13 +- fs/btrfs/file.c | 53 ++-- fs/btrfs/inode.c | 643 +++++++++++++++++++++++++++++++++++++----------- fs/btrfs/ordered-data.c | 13 +- fs/btrfs/super.c | 4 + fs/btrfs/zlib.c | 3 +- 11 files changed, 849 insertions(+), 207 deletions(-) (limited to 'fs/btrfs/ordered-data.c') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 354913177ba..284f21025bc 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -33,6 +33,7 @@ #include #include #include +#include #include "ctree.h" #include "disk-io.h" #include "transaction.h" @@ -145,9 +146,9 @@ static void end_compressed_bio_read(struct bio *bio, int err) } /* do io completion on the original bio */ - if (cb->errors) + if (cb->errors) { bio_io_error(cb->orig_bio); - else + } else bio_endio(cb->orig_bio, 0); /* finally free the cb struct */ @@ -333,6 +334,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, } bytes_left -= PAGE_CACHE_SIZE; first_byte += PAGE_CACHE_SIZE; + cond_resched(); } bio_get(bio); @@ -346,6 +348,130 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, return 0; } +static noinline int add_ra_bio_pages(struct inode *inode, + u64 compressed_end, + struct compressed_bio *cb) +{ + unsigned long end_index; + unsigned long page_index; + u64 last_offset; + u64 isize = i_size_read(inode); + int ret; + struct page *page; + unsigned long nr_pages = 0; + struct extent_map *em; + struct address_space *mapping = inode->i_mapping; + struct pagevec pvec; + struct extent_map_tree *em_tree; + struct extent_io_tree *tree; + u64 end; + int misses = 0; + + page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page; + last_offset = (page_offset(page) + PAGE_CACHE_SIZE); + em_tree = &BTRFS_I(inode)->extent_tree; + tree = &BTRFS_I(inode)->io_tree; + + if (isize == 0) + return 0; + + end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; + + pagevec_init(&pvec, 0); + while(last_offset < compressed_end) { + page_index = last_offset >> PAGE_CACHE_SHIFT; + + if (page_index > end_index) + break; + + rcu_read_lock(); + page = radix_tree_lookup(&mapping->page_tree, page_index); + rcu_read_unlock(); + if (page) { + misses++; + if (misses > 4) + break; + goto next; + } + + page = alloc_page(mapping_gfp_mask(mapping) | GFP_NOFS); + if (!page) + break; + + page->index = page_index; + /* + * what we want to do here is call add_to_page_cache_lru, + * but that isn't exported, so we reproduce it here + */ + if (add_to_page_cache(page, mapping, + page->index, GFP_NOFS)) { + page_cache_release(page); + goto next; + } + + /* open coding of lru_cache_add, also not exported */ + page_cache_get(page); + if (!pagevec_add(&pvec, page)) + __pagevec_lru_add(&pvec); + + end = last_offset + PAGE_CACHE_SIZE - 1; + /* + * at this point, we have a locked page in the page cache + * for these bytes in the file. But, we have to make + * sure they map to this compressed extent on disk. + */ + set_page_extent_mapped(page); + lock_extent(tree, last_offset, end, GFP_NOFS); + spin_lock(&em_tree->lock); + em = lookup_extent_mapping(em_tree, last_offset, + PAGE_CACHE_SIZE); + spin_unlock(&em_tree->lock); + + if (!em || last_offset < em->start || + (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) || + (em->block_start >> 9) != cb->orig_bio->bi_sector) { + free_extent_map(em); + unlock_extent(tree, last_offset, end, GFP_NOFS); + unlock_page(page); + page_cache_release(page); + break; + } + free_extent_map(em); + + if (page->index == end_index) { + char *userpage; + size_t zero_offset = isize & (PAGE_CACHE_SIZE - 1); + + if (zero_offset) { + int zeros; + zeros = PAGE_CACHE_SIZE - zero_offset; + userpage = kmap_atomic(page, KM_USER0); + memset(userpage + zero_offset, 0, zeros); + flush_dcache_page(page); + kunmap_atomic(userpage, KM_USER0); + } + } + + ret = bio_add_page(cb->orig_bio, page, + PAGE_CACHE_SIZE, 0); + + if (ret == PAGE_CACHE_SIZE) { + nr_pages++; + page_cache_release(page); + } else { + unlock_extent(tree, last_offset, end, GFP_NOFS); + unlock_page(page); + page_cache_release(page); + break; + } +next: + last_offset += PAGE_CACHE_SIZE; + } + if (pagevec_count(&pvec)) + __pagevec_lru_add(&pvec); + return 0; +} + /* * for a compressed read, the bio we get passed has all the inode pages * in it. We don't actually do IO on those pages but allocate new ones @@ -373,6 +499,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, struct block_device *bdev; struct bio *comp_bio; u64 cur_disk_byte = (u64)bio->bi_sector << 9; + u64 em_len; struct extent_map *em; int ret; @@ -393,6 +520,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, cb->start = em->start; compressed_len = em->block_len; + em_len = em->len; free_extent_map(em); cb->len = uncompressed_len; @@ -411,6 +539,17 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, } cb->nr_pages = nr_pages; + add_ra_bio_pages(inode, cb->start + em_len, cb); + + if (!btrfs_test_opt(root, NODATASUM) && + !btrfs_test_flag(inode, NODATASUM)) { + btrfs_lookup_bio_sums(root, inode, cb->orig_bio); + } + + /* include any pages we added in add_ra-bio_pages */ + uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; + cb->len = uncompressed_len; + comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); comp_bio->bi_private = cb; comp_bio->bi_end_io = end_compressed_bio_read; @@ -442,9 +581,10 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); atomic_inc(&cb->pending_bios); - bio->bi_private = cb; - bio->bi_end_io = end_compressed_bio_write; - bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); + comp_bio->bi_private = cb; + comp_bio->bi_end_io = end_compressed_bio_read; + + bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0); } cur_disk_byte += PAGE_CACHE_SIZE; } diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 689df070c8e..c83cc5b2ded 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -625,8 +625,8 @@ struct btrfs_fs_info { struct btrfs_transaction *running_transaction; wait_queue_head_t transaction_throttle; wait_queue_head_t transaction_wait; - wait_queue_head_t async_submit_wait; + wait_queue_head_t async_submit_wait; wait_queue_head_t tree_log_wait; struct btrfs_super_block super_copy; @@ -653,6 +653,7 @@ struct btrfs_fs_info { atomic_t nr_async_submits; atomic_t async_submit_draining; atomic_t nr_async_bios; + atomic_t async_delalloc_pages; atomic_t tree_log_writers; atomic_t tree_log_commit; unsigned long tree_log_batch; @@ -677,6 +678,7 @@ struct btrfs_fs_info { * two */ struct btrfs_workers workers; + struct btrfs_workers delalloc_workers; struct btrfs_workers endio_workers; struct btrfs_workers endio_write_workers; struct btrfs_workers submit_workers; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index e0a28f705a6..8efc123d222 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -539,6 +539,13 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, (atomic_read(&fs_info->nr_async_bios) < limit), HZ/10); } + + while(atomic_read(&fs_info->async_submit_draining) && + atomic_read(&fs_info->nr_async_submits)) { + wait_event(fs_info->async_submit_wait, + (atomic_read(&fs_info->nr_async_submits) == 0)); + } + return 0; } @@ -1437,6 +1444,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, INIT_LIST_HEAD(&fs_info->space_info); btrfs_mapping_init(&fs_info->mapping_tree); atomic_set(&fs_info->nr_async_submits, 0); + atomic_set(&fs_info->async_delalloc_pages, 0); atomic_set(&fs_info->async_submit_draining, 0); atomic_set(&fs_info->nr_async_bios, 0); atomic_set(&fs_info->throttles, 0); @@ -1550,6 +1558,9 @@ struct btrfs_root *open_ctree(struct super_block *sb, btrfs_init_workers(&fs_info->workers, "worker", fs_info->thread_pool_size); + btrfs_init_workers(&fs_info->delalloc_workers, "delalloc", + fs_info->thread_pool_size); + btrfs_init_workers(&fs_info->submit_workers, "submit", min_t(u64, fs_devices->num_devices, fs_info->thread_pool_size)); @@ -1560,15 +1571,12 @@ struct btrfs_root *open_ctree(struct super_block *sb, */ fs_info->submit_workers.idle_thresh = 64; - /* fs_info->workers is responsible for checksumming file data - * blocks and metadata. Using a larger idle thresh allows each - * worker thread to operate on things in roughly the order they - * were sent by the writeback daemons, improving overall locality - * of the IO going down the pipe. - */ - fs_info->workers.idle_thresh = 8; + fs_info->workers.idle_thresh = 16; fs_info->workers.ordered = 1; + fs_info->delalloc_workers.idle_thresh = 2; + fs_info->delalloc_workers.ordered = 1; + btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1); btrfs_init_workers(&fs_info->endio_workers, "endio", fs_info->thread_pool_size); @@ -1584,6 +1592,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, btrfs_start_workers(&fs_info->workers, 1); btrfs_start_workers(&fs_info->submit_workers, 1); + btrfs_start_workers(&fs_info->delalloc_workers, 1); btrfs_start_workers(&fs_info->fixup_workers, 1); btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size); btrfs_start_workers(&fs_info->endio_write_workers, @@ -1732,6 +1741,7 @@ fail_tree_root: fail_sys_array: fail_sb_buffer: btrfs_stop_workers(&fs_info->fixup_workers); + btrfs_stop_workers(&fs_info->delalloc_workers); btrfs_stop_workers(&fs_info->workers); btrfs_stop_workers(&fs_info->endio_workers); btrfs_stop_workers(&fs_info->endio_write_workers); @@ -1988,6 +1998,7 @@ int close_ctree(struct btrfs_root *root) truncate_inode_pages(fs_info->btree_inode->i_mapping, 0); btrfs_stop_workers(&fs_info->fixup_workers); + btrfs_stop_workers(&fs_info->delalloc_workers); btrfs_stop_workers(&fs_info->workers); btrfs_stop_workers(&fs_info->endio_workers); btrfs_stop_workers(&fs_info->endio_write_workers); @@ -2062,7 +2073,7 @@ void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr) struct extent_io_tree *tree; u64 num_dirty; u64 start = 0; - unsigned long thresh = 96 * 1024 * 1024; + unsigned long thresh = 32 * 1024 * 1024; tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree; if (current_is_pdflush() || current->flags & PF_MEMALLOC) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 8af39521eb7..ebd8275a193 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -768,7 +768,11 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, l = path->nodes[0]; btrfs_item_key_to_cpu(l, &key, path->slots[0]); - BUG_ON(key.objectid != bytenr); + if (key.objectid != bytenr) { + btrfs_print_leaf(root->fs_info->extent_root, path->nodes[0]); + printk("wanted %Lu found %Lu\n", bytenr, key.objectid); + BUG(); + } BUG_ON(key.type != BTRFS_EXTENT_ITEM_KEY); item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 9b37ce6e516..bbe3bcfcf4a 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -47,6 +47,11 @@ struct extent_page_data { struct bio *bio; struct extent_io_tree *tree; get_extent_t *get_extent; + + /* tells writepage not to lock the state bits for this range + * it still does the unlocking + */ + int extent_locked; }; int __init extent_io_init(void) @@ -1198,11 +1203,18 @@ static noinline int lock_delalloc_pages(struct inode *inode, * the caller is taking responsibility for * locked_page */ - if (pages[i] != locked_page) + if (pages[i] != locked_page) { lock_page(pages[i]); + if (pages[i]->mapping != inode->i_mapping) { + ret = -EAGAIN; + unlock_page(pages[i]); + page_cache_release(pages[i]); + goto done; + } + } page_cache_release(pages[i]); + pages_locked++; } - pages_locked += ret; nrpages -= ret; index += ret; cond_resched(); @@ -1262,8 +1274,7 @@ again: * if we're looping. */ if (delalloc_end + 1 - delalloc_start > max_bytes && loops) { - delalloc_end = (delalloc_start + PAGE_CACHE_SIZE - 1) & - ~((u64)PAGE_CACHE_SIZE - 1); + delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1; } /* step two, lock all the pages after the page that has start */ ret = lock_delalloc_pages(inode, locked_page, @@ -1306,7 +1317,10 @@ out_failed: int extent_clear_unlock_delalloc(struct inode *inode, struct extent_io_tree *tree, u64 start, u64 end, struct page *locked_page, - int clear_dirty, int set_writeback, + int unlock_pages, + int clear_unlock, + int clear_delalloc, int clear_dirty, + int set_writeback, int end_writeback) { int ret; @@ -1315,12 +1329,19 @@ int extent_clear_unlock_delalloc(struct inode *inode, unsigned long end_index = end >> PAGE_CACHE_SHIFT; unsigned long nr_pages = end_index - index + 1; int i; - int clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC; + int clear_bits = 0; + if (clear_unlock) + clear_bits |= EXTENT_LOCKED; if (clear_dirty) clear_bits |= EXTENT_DIRTY; + if (clear_delalloc) + clear_bits |= EXTENT_DELALLOC; + clear_extent_bit(tree, start, end, clear_bits, 1, 0, GFP_NOFS); + if (!(unlock_pages || clear_dirty || set_writeback || end_writeback)) + return 0; while(nr_pages > 0) { ret = find_get_pages_contig(inode->i_mapping, index, @@ -1336,7 +1357,8 @@ int extent_clear_unlock_delalloc(struct inode *inode, set_page_writeback(pages[i]); if (end_writeback) end_page_writeback(pages[i]); - unlock_page(pages[i]); + if (unlock_pages) + unlock_page(pages[i]); page_cache_release(pages[i]); } nr_pages -= ret; @@ -1741,9 +1763,10 @@ static void end_bio_extent_readpage(struct bio *bio, int err) } } - if (uptodate) + if (uptodate) { set_extent_uptodate(tree, start, end, GFP_ATOMIC); + } unlock_extent(tree, start, end, GFP_ATOMIC); if (whole_page) { @@ -1925,6 +1948,7 @@ void set_page_extent_mapped(struct page *page) set_page_private(page, EXTENT_PAGE_PRIVATE); } } +EXPORT_SYMBOL(set_page_extent_mapped); void set_page_extent_head(struct page *page, unsigned long len) { @@ -2143,12 +2167,17 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, u64 delalloc_end; int page_started; int compressed; + unsigned long nr_written = 0; WARN_ON(!PageLocked(page)); pg_offset = i_size & (PAGE_CACHE_SIZE - 1); if (page->index > end_index || (page->index == end_index && !pg_offset)) { - page->mapping->a_ops->invalidatepage(page, 0); + if (epd->extent_locked) { + if (tree->ops && tree->ops->writepage_end_io_hook) + tree->ops->writepage_end_io_hook(page, start, + page_end, NULL, 1); + } unlock_page(page); return 0; } @@ -2169,27 +2198,33 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, delalloc_start = start; delalloc_end = 0; page_started = 0; - while(delalloc_end < page_end) { - nr_delalloc = find_lock_delalloc_range(inode, tree, + if (!epd->extent_locked) { + while(delalloc_end < page_end) { + nr_delalloc = find_lock_delalloc_range(inode, tree, page, &delalloc_start, &delalloc_end, 128 * 1024 * 1024); - if (nr_delalloc == 0) { + if (nr_delalloc == 0) { + delalloc_start = delalloc_end + 1; + continue; + } + tree->ops->fill_delalloc(inode, page, delalloc_start, + delalloc_end, &page_started, + &nr_written); delalloc_start = delalloc_end + 1; - continue; } - tree->ops->fill_delalloc(inode, page, delalloc_start, - delalloc_end, &page_started); - delalloc_start = delalloc_end + 1; - } - /* did the fill delalloc function already unlock and start the IO? */ - if (page_started) { - return 0; + /* did the fill delalloc function already unlock and start + * the IO? + */ + if (page_started) { + ret = 0; + goto update_nr_written; + } } - lock_extent(tree, start, page_end, GFP_NOFS); + unlock_start = start; if (tree->ops && tree->ops->writepage_start_hook) { @@ -2199,10 +2234,13 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, unlock_extent(tree, start, page_end, GFP_NOFS); redirty_page_for_writepage(wbc, page); unlock_page(page); - return 0; + ret = 0; + goto update_nr_written; } } + nr_written++; + end = page_end; if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) { printk("found delalloc bits after lock_extent\n"); @@ -2333,6 +2371,12 @@ done: if (unlock_start <= page_end) unlock_extent(tree, unlock_start, page_end, GFP_NOFS); unlock_page(page); + +update_nr_written: + wbc->nr_to_write -= nr_written; + if (wbc->range_cyclic || (wbc->nr_to_write > 0 && + wbc->range_start == 0 && wbc->range_end == LLONG_MAX)) + page->mapping->writeback_index = page->index + nr_written; return 0; } @@ -2431,7 +2475,7 @@ retry: unlock_page(page); ret = 0; } - if (ret || (--(wbc->nr_to_write) <= 0)) + if (ret || wbc->nr_to_write <= 0) done = 1; if (wbc->nonblocking && bdi_write_congested(bdi)) { wbc->encountered_congestion = 1; @@ -2452,6 +2496,8 @@ retry: } if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) mapping->writeback_index = index; + if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) + range_whole = 1; if (wbc->range_cont) wbc->range_start = index << PAGE_CACHE_SHIFT; @@ -2469,6 +2515,7 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page, .bio = NULL, .tree = tree, .get_extent = get_extent, + .extent_locked = 0, }; struct writeback_control wbc_writepages = { .bdi = wbc->bdi, @@ -2491,6 +2538,52 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page, } EXPORT_SYMBOL(extent_write_full_page); +int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode, + u64 start, u64 end, get_extent_t *get_extent, + int mode) +{ + int ret = 0; + struct address_space *mapping = inode->i_mapping; + struct page *page; + unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >> + PAGE_CACHE_SHIFT; + + struct extent_page_data epd = { + .bio = NULL, + .tree = tree, + .get_extent = get_extent, + .extent_locked = 1, + }; + struct writeback_control wbc_writepages = { + .bdi = inode->i_mapping->backing_dev_info, + .sync_mode = mode, + .older_than_this = NULL, + .nr_to_write = nr_pages * 2, + .range_start = start, + .range_end = end + 1, + }; + + while(start <= end) { + page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); + if (clear_page_dirty_for_io(page)) + ret = __extent_writepage(page, &wbc_writepages, &epd); + else { + if (tree->ops && tree->ops->writepage_end_io_hook) + tree->ops->writepage_end_io_hook(page, start, + start + PAGE_CACHE_SIZE - 1, + NULL, 1); + unlock_page(page); + } + page_cache_release(page); + start += PAGE_CACHE_SIZE; + } + + if (epd.bio) + submit_one_bio(WRITE, epd.bio, 0, 0); + return ret; +} +EXPORT_SYMBOL(extent_write_locked_range); + int extent_writepages(struct extent_io_tree *tree, struct address_space *mapping, @@ -2502,6 +2595,7 @@ int extent_writepages(struct extent_io_tree *tree, .bio = NULL, .tree = tree, .get_extent = get_extent, + .extent_locked = 0, }; ret = extent_write_cache_pages(tree, mapping, wbc, diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 283110ec4ee..2d5f67065b6 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -35,7 +35,8 @@ typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw, unsigned long bio_flags); struct extent_io_ops { int (*fill_delalloc)(struct inode *inode, struct page *locked_page, - u64 start, u64 end, int *page_started); + u64 start, u64 end, int *page_started, + unsigned long *nr_written); int (*writepage_start_hook)(struct page *page, u64 start, u64 end); int (*writepage_io_hook)(struct page *page, u64 start, u64 end); extent_submit_bio_hook_t *submit_bio_hook; @@ -172,6 +173,9 @@ int extent_invalidatepage(struct extent_io_tree *tree, int extent_write_full_page(struct extent_io_tree *tree, struct page *page, get_extent_t *get_extent, struct writeback_control *wbc); +int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode, + u64 start, u64 end, get_extent_t *get_extent, + int mode); int extent_writepages(struct extent_io_tree *tree, struct address_space *mapping, get_extent_t *get_extent, @@ -256,6 +260,9 @@ int extent_range_uptodate(struct extent_io_tree *tree, int extent_clear_unlock_delalloc(struct inode *inode, struct extent_io_tree *tree, u64 start, u64 end, struct page *locked_page, - int clear_dirty, int set_writeback, - int clear_writeback); + int unlock_page, + int clear_unlock, + int clear_delalloc, int clear_dirty, + int set_writeback, + int end_writeback); #endif diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 0c8cc35a8b9..337221ecca2 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -368,6 +368,8 @@ int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans, u64 search_start = start; u64 leaf_start; u64 ram_bytes = 0; + u64 orig_parent = 0; + u64 disk_bytenr = 0; u8 compression; u8 encryption; u16 other_encoding = 0; @@ -500,17 +502,31 @@ next_slot: keep = 1; } - if (bookend && found_extent && locked_end < extent_end) { - ret = try_lock_extent(&BTRFS_I(inode)->io_tree, - locked_end, extent_end - 1, GFP_NOFS); - if (!ret) { - btrfs_release_path(root, path); - lock_extent(&BTRFS_I(inode)->io_tree, - locked_end, extent_end - 1, GFP_NOFS); + if (bookend && found_extent) { + if (locked_end < extent_end) { + ret = try_lock_extent(&BTRFS_I(inode)->io_tree, + locked_end, extent_end - 1, + GFP_NOFS); + if (!ret) { + btrfs_release_path(root, path); + lock_extent(&BTRFS_I(inode)->io_tree, + locked_end, extent_end - 1, + GFP_NOFS); + locked_end = extent_end; + continue; + } locked_end = extent_end; - continue; } - locked_end = extent_end; + orig_parent = path->nodes[0]->start; + disk_bytenr = le64_to_cpu(old.disk_bytenr); + if (disk_bytenr != 0) { + ret = btrfs_inc_extent_ref(trans, root, + disk_bytenr, + le64_to_cpu(old.disk_num_bytes), + orig_parent, root->root_key.objectid, + trans->transid, inode->i_ino); + BUG_ON(ret); + } } if (found_inline) { @@ -537,8 +553,12 @@ next_slot: inode_sub_bytes(inode, old_num - new_num); } - btrfs_set_file_extent_num_bytes(leaf, extent, - new_num); + if (!compression && !encryption) { + btrfs_set_file_extent_ram_bytes(leaf, + extent, new_num); + } + btrfs_set_file_extent_num_bytes(leaf, + extent, new_num); btrfs_mark_buffer_dirty(leaf); } else if (key.offset < inline_limit && (end > extent_end) && @@ -582,11 +602,11 @@ next_slot: } /* create bookend, splitting the extent in two */ if (bookend && found_extent) { - u64 disk_bytenr; struct btrfs_key ins; ins.objectid = inode->i_ino; ins.offset = end; btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY); + btrfs_release_path(root, path); ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*extent)); @@ -623,14 +643,13 @@ next_slot: btrfs_mark_buffer_dirty(path->nodes[0]); - disk_bytenr = le64_to_cpu(old.disk_bytenr); if (disk_bytenr != 0) { - ret = btrfs_inc_extent_ref(trans, root, - disk_bytenr, - le64_to_cpu(old.disk_num_bytes), - leaf->start, + ret = btrfs_update_extent_ref(trans, root, + disk_bytenr, orig_parent, + leaf->start, root->root_key.objectid, trans->transid, ins.objectid); + BUG_ON(ret); } btrfs_release_path(root, path); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 3df0ffad976..e01c0d0310a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -86,6 +86,10 @@ static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = { static void btrfs_truncate(struct inode *inode); static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end); +static noinline int cow_file_range(struct inode *inode, + struct page *locked_page, + u64 start, u64 end, int *page_started, + unsigned long *nr_written, int unlock); /* * a very lame attempt at stopping writes when the FS is 85% full. There @@ -262,35 +266,72 @@ static int cow_file_range_inline(struct btrfs_trans_handle *trans, return 0; } +struct async_extent { + u64 start; + u64 ram_size; + u64 compressed_size; + struct page **pages; + unsigned long nr_pages; + struct list_head list; +}; + +struct async_cow { + struct inode *inode; + struct btrfs_root *root; + struct page *locked_page; + u64 start; + u64 end; + struct list_head extents; + struct btrfs_work work; +}; + +static noinline int add_async_extent(struct async_cow *cow, + u64 start, u64 ram_size, + u64 compressed_size, + struct page **pages, + unsigned long nr_pages) +{ + struct async_extent *async_extent; + + async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS); + async_extent->start = start; + async_extent->ram_size = ram_size; + async_extent->compressed_size = compressed_size; + async_extent->pages = pages; + async_extent->nr_pages = nr_pages; + list_add_tail(&async_extent->list, &cow->extents); + return 0; +} + /* - * when extent_io.c finds a delayed allocation range in the file, - * the call backs end up in this code. The basic idea is to - * allocate extents on disk for the range, and create ordered data structs - * in ram to track those extents. + * we create compressed extents in two phases. The first + * phase compresses a range of pages that have already been + * locked (both pages and state bits are locked). * - * locked_page is the page that writepage had locked already. We use - * it to make sure we don't do extra locks or unlocks. + * This is done inside an ordered work queue, and the compression + * is spread across many cpus. The actual IO submission is step + * two, and the ordered work queue takes care of making sure that + * happens in the same order things were put onto the queue by + * writepages and friends. * - * *page_started is set to one if we unlock locked_page and do everything - * required to start IO on it. It may be clean and already done with - * IO when we return. + * If this code finds it can't get good compression, it puts an + * entry onto the work queue to write the uncompressed bytes. This + * makes sure that both compressed inodes and uncompressed inodes + * are written in the same order that pdflush sent them down. */ -static int cow_file_range(struct inode *inode, struct page *locked_page, - u64 start, u64 end, int *page_started) +static noinline int compress_file_range(struct inode *inode, + struct page *locked_page, + u64 start, u64 end, + struct async_cow *async_cow, + int *num_added) { struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_trans_handle *trans; - u64 alloc_hint = 0; u64 num_bytes; - unsigned long ram_size; u64 orig_start; u64 disk_num_bytes; - u64 cur_alloc_size; u64 blocksize = root->sectorsize; u64 actual_end; - struct btrfs_key ins; - struct extent_map *em; - struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; int ret = 0; struct page **pages = NULL; unsigned long nr_pages; @@ -298,22 +339,12 @@ static int cow_file_range(struct inode *inode, struct page *locked_page, unsigned long total_compressed = 0; unsigned long total_in = 0; unsigned long max_compressed = 128 * 1024; - unsigned long max_uncompressed = 256 * 1024; + unsigned long max_uncompressed = 128 * 1024; int i; - int ordered_type; int will_compress; - trans = btrfs_join_transaction(root, 1); - BUG_ON(!trans); - btrfs_set_trans_block_group(trans, inode); orig_start = start; - /* - * compression made this loop a bit ugly, but the basic idea is to - * compress some pages but keep the total size of the compressed - * extent relatively small. If compression is off, this goto target - * is never used. - */ again: will_compress = 0; nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1; @@ -324,7 +355,13 @@ again: /* we want to make sure that amount of ram required to uncompress * an extent is reasonable, so we limit the total size in ram - * of a compressed extent to 256k + * of a compressed extent to 128k. This is a crucial number + * because it also controls how easily we can spread reads across + * cpus for decompression. + * + * We also want to make sure the amount of IO required to do + * a random read is reasonably small, so we limit the size of + * a compressed extent to 128k. */ total_compressed = min(total_compressed, max_uncompressed); num_bytes = (end - start + blocksize) & ~(blocksize - 1); @@ -333,18 +370,16 @@ again: total_in = 0; ret = 0; - /* we do compression for mount -o compress and when the - * inode has not been flagged as nocompress + /* + * we do compression for mount -o compress and when the + * inode has not been flagged as nocompress. This flag can + * change at any time if we discover bad compression ratios. */ if (!btrfs_test_flag(inode, NOCOMPRESS) && btrfs_test_opt(root, COMPRESS)) { WARN_ON(pages); pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); - /* we want to make sure the amount of IO required to satisfy - * a random read is reasonably small, so we limit the size - * of a compressed extent to 128k - */ ret = btrfs_zlib_compress_pages(inode->i_mapping, start, total_compressed, pages, nr_pages, &nr_pages_ret, @@ -371,26 +406,34 @@ again: } } if (start == 0) { + trans = btrfs_join_transaction(root, 1); + BUG_ON(!trans); + btrfs_set_trans_block_group(trans, inode); + /* lets try to make an inline extent */ - if (ret || total_in < (end - start + 1)) { + if (ret || total_in < (actual_end - start)) { /* we didn't compress the entire range, try - * to make an uncompressed inline extent. This - * is almost sure to fail, but maybe inline sizes - * will get bigger later + * to make an uncompressed inline extent. */ ret = cow_file_range_inline(trans, root, inode, start, end, 0, NULL); } else { + /* try making a compressed inline extent */ ret = cow_file_range_inline(trans, root, inode, start, end, total_compressed, pages); } + btrfs_end_transaction(trans, root); if (ret == 0) { + /* + * inline extent creation worked, we don't need + * to create any more async work items. Unlock + * and free up our temp pages. + */ extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, - start, end, NULL, - 1, 1, 1); - *page_started = 1; + start, end, NULL, 1, 0, + 0, 1, 1, 1); ret = 0; goto free_pages_out; } @@ -435,53 +478,280 @@ again: /* flag the file so we don't compress in the future */ btrfs_set_flag(inode, NOCOMPRESS); } + if (will_compress) { + *num_added += 1; - BUG_ON(disk_num_bytes > - btrfs_super_total_bytes(&root->fs_info->super_copy)); + /* the async work queues will take care of doing actual + * allocation on disk for these compressed pages, + * and will submit them to the elevator. + */ + add_async_extent(async_cow, start, num_bytes, + total_compressed, pages, nr_pages_ret); - btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); + if (start + num_bytes < end) { + start += num_bytes; + pages = NULL; + cond_resched(); + goto again; + } + } else { + /* + * No compression, but we still need to write the pages in + * the file we've been given so far. redirty the locked + * page if it corresponds to our extent and set things up + * for the async work queue to run cow_file_range to do + * the normal delalloc dance + */ + if (page_offset(locked_page) >= start && + page_offset(locked_page) <= end) { + __set_page_dirty_nobuffers(locked_page); + /* unlocked later on in the async handlers */ + } + add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0); + *num_added += 1; + } - while(disk_num_bytes > 0) { - unsigned long min_bytes; +out: + return 0; + +free_pages_out: + for (i = 0; i < nr_pages_ret; i++) { + WARN_ON(pages[i]->mapping); + page_cache_release(pages[i]); + } + if (pages) + kfree(pages); + + goto out; +} + +/* + * phase two of compressed writeback. This is the ordered portion + * of the code, which only gets called in the order the work was + * queued. We walk all the async extents created by compress_file_range + * and send them down to the disk. + */ +static noinline int submit_compressed_extents(struct inode *inode, + struct async_cow *async_cow) +{ + struct async_extent *async_extent; + u64 alloc_hint = 0; + struct btrfs_trans_handle *trans; + struct btrfs_key ins; + struct extent_map *em; + struct btrfs_root *root = BTRFS_I(inode)->root; + struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; + struct extent_io_tree *io_tree; + int ret; + + if (list_empty(&async_cow->extents)) + return 0; + + trans = btrfs_join_transaction(root, 1); + + while(!list_empty(&async_cow->extents)) { + async_extent = list_entry(async_cow->extents.next, + struct async_extent, list); + list_del(&async_extent->list); + io_tree = &BTRFS_I(inode)->io_tree; + + /* did the compression code fall back to uncompressed IO? */ + if (!async_extent->pages) { + int page_started = 0; + unsigned long nr_written = 0; + + lock_extent(io_tree, async_extent->start, + async_extent->start + async_extent->ram_size - 1, + GFP_NOFS); + + /* allocate blocks */ + cow_file_range(inode, async_cow->locked_page, + async_extent->start, + async_extent->start + + async_extent->ram_size - 1, + &page_started, &nr_written, 0); + + /* + * if page_started, cow_file_range inserted an + * inline extent and took care of all the unlocking + * and IO for us. Otherwise, we need to submit + * all those pages down to the drive. + */ + if (!page_started) + extent_write_locked_range(io_tree, + inode, async_extent->start, + async_extent->start + + async_extent->ram_size - 1, + btrfs_get_extent, + WB_SYNC_ALL); + kfree(async_extent); + cond_resched(); + continue; + } + + lock_extent(io_tree, async_extent->start, + async_extent->start + async_extent->ram_size - 1, + GFP_NOFS); /* - * the max size of a compressed extent is pretty small, - * make the code a little less complex by forcing - * the allocator to find a whole compressed extent at once + * here we're doing allocation and writeback of the + * compressed pages */ - if (will_compress) - min_bytes = disk_num_bytes; - else - min_bytes = root->sectorsize; + btrfs_drop_extent_cache(inode, async_extent->start, + async_extent->start + + async_extent->ram_size - 1, 0); + + ret = btrfs_reserve_extent(trans, root, + async_extent->compressed_size, + async_extent->compressed_size, + 0, alloc_hint, + (u64)-1, &ins, 1); + BUG_ON(ret); + em = alloc_extent_map(GFP_NOFS); + em->start = async_extent->start; + em->len = async_extent->ram_size; + + em->block_start = ins.objectid; + em->block_len = ins.offset; + em->bdev = root->fs_info->fs_devices->latest_bdev; + set_bit(EXTENT_FLAG_PINNED, &em->flags); + set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); + + while(1) { + spin_lock(&em_tree->lock); + ret = add_extent_mapping(em_tree, em); + spin_unlock(&em_tree->lock); + if (ret != -EEXIST) { + free_extent_map(em); + break; + } + btrfs_drop_extent_cache(inode, async_extent->start, + async_extent->start + + async_extent->ram_size - 1, 0); + } + + ret = btrfs_add_ordered_extent(inode, async_extent->start, + ins.objectid, + async_extent->ram_size, + ins.offset, + BTRFS_ORDERED_COMPRESSED); + BUG_ON(ret); + + btrfs_end_transaction(trans, root); + + /* + * clear dirty, set writeback and unlock the pages. + */ + extent_clear_unlock_delalloc(inode, + &BTRFS_I(inode)->io_tree, + async_extent->start, + async_extent->start + + async_extent->ram_size - 1, + NULL, 1, 1, 0, 1, 1, 0); + + ret = btrfs_submit_compressed_write(inode, + async_extent->start, + async_extent->ram_size, + ins.objectid, + ins.offset, async_extent->pages, + async_extent->nr_pages); + + BUG_ON(ret); + trans = btrfs_join_transaction(root, 1); + alloc_hint = ins.objectid + ins.offset; + kfree(async_extent); + cond_resched(); + } + + btrfs_end_transaction(trans, root); + return 0; +} + +/* + * when extent_io.c finds a delayed allocation range in the file, + * the call backs end up in this code. The basic idea is to + * allocate extents on disk for the range, and create ordered data structs + * in ram to track those extents. + * + * locked_page is the page that writepage had locked already. We use + * it to make sure we don't do extra locks or unlocks. + * + * *page_started is set to one if we unlock locked_page and do everything + * required to start IO on it. It may be clean and already done with + * IO when we return. + */ +static noinline int cow_file_range(struct inode *inode, + struct page *locked_page, + u64 start, u64 end, int *page_started, + unsigned long *nr_written, + int unlock) +{ + struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_trans_handle *trans; + u64 alloc_hint = 0; + u64 num_bytes; + unsigned long ram_size; + u64 disk_num_bytes; + u64 cur_alloc_size; + u64 blocksize = root->sectorsize; + u64 actual_end; + struct btrfs_key ins; + struct extent_map *em; + struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; + int ret = 0; + + trans = btrfs_join_transaction(root, 1); + BUG_ON(!trans); + btrfs_set_trans_block_group(trans, inode); + actual_end = min_t(u64, i_size_read(inode), end + 1); + + num_bytes = (end - start + blocksize) & ~(blocksize - 1); + num_bytes = max(blocksize, num_bytes); + disk_num_bytes = num_bytes; + ret = 0; + + if (start == 0) { + /* lets try to make an inline extent */ + ret = cow_file_range_inline(trans, root, inode, + start, end, 0, NULL); + if (ret == 0) { + extent_clear_unlock_delalloc(inode, + &BTRFS_I(inode)->io_tree, + start, end, NULL, 1, 1, + 1, 1, 1, 1); + *nr_written = *nr_written + + (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE; + *page_started = 1; + ret = 0; + goto out; + } + } + + BUG_ON(disk_num_bytes > + btrfs_super_total_bytes(&root->fs_info->super_copy)); + + btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); + + while(disk_num_bytes > 0) { cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent); ret = btrfs_reserve_extent(trans, root, cur_alloc_size, - min_bytes, 0, alloc_hint, + root->sectorsize, 0, alloc_hint, (u64)-1, &ins, 1); if (ret) { - WARN_ON(1); - goto free_pages_out_fail; + BUG(); } em = alloc_extent_map(GFP_NOFS); em->start = start; - if (will_compress) { - ram_size = num_bytes; - em->len = num_bytes; - } else { - /* ramsize == disk size */ - ram_size = ins.offset; - em->len = ins.offset; - } + ram_size = ins.offset; + em->len = ins.offset; em->block_start = ins.objectid; em->block_len = ins.offset; em->bdev = root->fs_info->fs_devices->latest_bdev; set_bit(EXTENT_FLAG_PINNED, &em->flags); - if (will_compress) - set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); - while(1) { spin_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em); @@ -495,10 +765,8 @@ again: } cur_alloc_size = ins.offset; - ordered_type = will_compress ? BTRFS_ORDERED_COMPRESSED : 0; ret = btrfs_add_ordered_extent(inode, start, ins.objectid, - ram_size, cur_alloc_size, - ordered_type); + ram_size, cur_alloc_size, 0); BUG_ON(ret); if (disk_num_bytes < cur_alloc_size) { @@ -506,82 +774,145 @@ again: cur_alloc_size); break; } - - if (will_compress) { - /* - * we're doing compression, we and we need to - * submit the compressed extents down to the device. - * - * We lock down all the file pages, clearing their - * dirty bits and setting them writeback. Everyone - * that wants to modify the page will wait on the - * ordered extent above. - * - * The writeback bits on the file pages are - * cleared when the compressed pages are on disk - */ - btrfs_end_transaction(trans, root); - - if (start <= page_offset(locked_page) && - page_offset(locked_page) < start + ram_size) { - *page_started = 1; - } - - extent_clear_unlock_delalloc(inode, - &BTRFS_I(inode)->io_tree, - start, - start + ram_size - 1, - NULL, 1, 1, 0); - - ret = btrfs_submit_compressed_write(inode, start, - ram_size, ins.objectid, - cur_alloc_size, pages, - nr_pages_ret); - - BUG_ON(ret); - trans = btrfs_join_transaction(root, 1); - if (start + ram_size < end) { - start += ram_size; - alloc_hint = ins.objectid + ins.offset; - /* pages will be freed at end_bio time */ - pages = NULL; - goto again; - } else { - /* we've written everything, time to go */ - break; - } - } /* we're not doing compressed IO, don't unlock the first * page (which the caller expects to stay locked), don't * clear any dirty bits and don't set any writeback bits */ extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, start, start + ram_size - 1, - locked_page, 0, 0, 0); + locked_page, unlock, 1, + 1, 0, 0, 0); disk_num_bytes -= cur_alloc_size; num_bytes -= cur_alloc_size; alloc_hint = ins.objectid + ins.offset; start += cur_alloc_size; } - - ret = 0; out: + ret = 0; btrfs_end_transaction(trans, root); return ret; +} -free_pages_out_fail: - extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, - start, end, locked_page, 0, 0, 0); -free_pages_out: - for (i = 0; i < nr_pages_ret; i++) { - WARN_ON(pages[i]->mapping); - page_cache_release(pages[i]); +/* + * work queue call back to started compression on a file and pages + */ +static noinline void async_cow_start(struct btrfs_work *work) +{ + struct async_cow *async_cow; + int num_added = 0; + async_cow = container_of(work, struct async_cow, work); + + compress_file_range(async_cow->inode, async_cow->locked_page, + async_cow->start, async_cow->end, async_cow, + &num_added); + if (num_added == 0) + async_cow->inode = NULL; +} + +/* + * work queue call back to submit previously compressed pages + */ +static noinline void async_cow_submit(struct btrfs_work *work) +{ + struct async_cow *async_cow; + struct btrfs_root *root; + unsigned long nr_pages; + + async_cow = container_of(work, struct async_cow, work); + + root = async_cow->root; + nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >> + PAGE_CACHE_SHIFT; + + atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages); + + if (atomic_read(&root->fs_info->async_delalloc_pages) < + 5 * 1042 * 1024 && + waitqueue_active(&root->fs_info->async_submit_wait)) + wake_up(&root->fs_info->async_submit_wait); + + if (async_cow->inode) { + submit_compressed_extents(async_cow->inode, async_cow); } - if (pages) - kfree(pages); +} - goto out; +static noinline void async_cow_free(struct btrfs_work *work) +{ + struct async_cow *async_cow; + async_cow = container_of(work, struct async_cow, work); + kfree(async_cow); +} + +static int cow_file_range_async(struct inode *inode, struct page *locked_page, + u64 start, u64 end, int *page_started, + unsigned long *nr_written) +{ + struct async_cow *async_cow; + struct btrfs_root *root = BTRFS_I(inode)->root; + unsigned long nr_pages; + u64 cur_end; + int limit = 10 * 1024 * 1042; + + if (!btrfs_test_opt(root, COMPRESS)) { + return cow_file_range(inode, locked_page, start, end, + page_started, nr_written, 1); + } + + clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED | + EXTENT_DELALLOC, 1, 0, GFP_NOFS); + while(start < end) { + async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); + async_cow->inode = inode; + async_cow->root = root; + async_cow->locked_page = locked_page; + async_cow->start = start; + + if (btrfs_test_flag(inode, NOCOMPRESS)) + cur_end = end; + else + cur_end = min(end, start + 512 * 1024 - 1); + + async_cow->end = cur_end; + INIT_LIST_HEAD(&async_cow->extents); + + async_cow->work.func = async_cow_start; + async_cow->work.ordered_func = async_cow_submit; + async_cow->work.ordered_free = async_cow_free; + async_cow->work.flags = 0; + + while(atomic_read(&root->fs_info->async_submit_draining) && + atomic_read(&root->fs_info->async_delalloc_pages)) { + wait_event(root->fs_info->async_submit_wait, + (atomic_read(&root->fs_info->async_delalloc_pages) + == 0)); + } + + nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >> + PAGE_CACHE_SHIFT; + atomic_add(nr_pages, &root->fs_info->async_delalloc_pages); + + btrfs_queue_worker(&root->fs_info->delalloc_workers, + &async_cow->work); + + if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) { + wait_event(root->fs_info->async_submit_wait, + (atomic_read(&root->fs_info->async_delalloc_pages) < + limit)); + } + + while(atomic_read(&root->fs_info->async_submit_draining) && + atomic_read(&root->fs_info->async_delalloc_pages)) { + wait_event(root->fs_info->async_submit_wait, + (atomic_read(&root->fs_info->async_delalloc_pages) == + 0)); + } + + *nr_written += nr_pages; + start = cur_end + 1; + } + *page_started = 1; + return 0; } /* @@ -592,7 +923,8 @@ free_pages_out: * blocks on disk */ static int run_delalloc_nocow(struct inode *inode, struct page *locked_page, - u64 start, u64 end, int *page_started, int force) + u64 start, u64 end, int *page_started, int force, + unsigned long *nr_written) { struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_trans_handle *trans; @@ -711,7 +1043,8 @@ out_check: btrfs_release_path(root, path); if (cow_start != (u64)-1) { ret = cow_file_range(inode, locked_page, cow_start, - found_key.offset - 1, page_started); + found_key.offset - 1, page_started, + nr_written, 1); BUG_ON(ret); cow_start = (u64)-1; } @@ -748,9 +1081,10 @@ out_check: ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr, num_bytes, num_bytes, type); BUG_ON(ret); + extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, cur_offset, cur_offset + num_bytes - 1, - locked_page, 0, 0, 0); + locked_page, 1, 1, 1, 0, 0, 0); cur_offset = extent_end; if (cur_offset > end) break; @@ -761,7 +1095,7 @@ out_check: cow_start = cur_offset; if (cow_start != (u64)-1) { ret = cow_file_range(inode, locked_page, cow_start, end, - page_started); + page_started, nr_written, 1); BUG_ON(ret); } @@ -775,7 +1109,8 @@ out_check: * extent_io.c call back to do delayed allocation processing */ static int run_delalloc_range(struct inode *inode, struct page *locked_page, - u64 start, u64 end, int *page_started) + u64 start, u64 end, int *page_started, + unsigned long *nr_written) { struct btrfs_root *root = BTRFS_I(inode)->root; int ret; @@ -783,13 +1118,13 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page, if (btrfs_test_opt(root, NODATACOW) || btrfs_test_flag(inode, NODATACOW)) ret = run_delalloc_nocow(inode, locked_page, start, end, - page_started, 0); + page_started, 0, nr_written); else if (btrfs_test_flag(inode, PREALLOC)) ret = run_delalloc_nocow(inode, locked_page, start, end, - page_started, 1); + page_started, 1, nr_written); else - ret = cow_file_range(inode, locked_page, start, end, - page_started); + ret = cow_file_range_async(inode, locked_page, start, end, + page_started, nr_written); return ret; } @@ -861,6 +1196,9 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset, u64 map_length; int ret; + if (bio_flags & EXTENT_BIO_COMPRESSED) + return 0; + length = bio->bi_size; map_tree = &root->fs_info->mapping_tree; map_length = length; @@ -925,12 +1263,12 @@ int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, btrfs_test_flag(inode, NODATASUM); if (!(rw & (1 << BIO_RW))) { - if (!skip_sum) - btrfs_lookup_bio_sums(root, inode, bio); if (bio_flags & EXTENT_BIO_COMPRESSED) return btrfs_submit_compressed_read(inode, bio, mirror_num, bio_flags); + else if (!skip_sum) + btrfs_lookup_bio_sums(root, inode, bio); goto mapit; } else if (!skip_sum) { /* we're doing a write, do the async checksumming */ @@ -966,6 +1304,9 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans, int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end) { + if ((end & (PAGE_CACHE_SIZE - 1)) == 0) { + WARN_ON(1); + } return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); } @@ -2105,6 +2446,7 @@ noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, int pending_del_nr = 0; int pending_del_slot = 0; int extent_type = -1; + int encoding; u64 mask = root->sectorsize - 1; if (root->ref_cows) @@ -2144,6 +2486,7 @@ search_again: leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); found_type = btrfs_key_type(&found_key); + encoding = 0; if (found_key.objectid != inode->i_ino) break; @@ -2156,6 +2499,10 @@ search_again: fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); extent_type = btrfs_file_extent_type(leaf, fi); + encoding = btrfs_file_extent_compression(leaf, fi); + encoding |= btrfs_file_extent_encryption(leaf, fi); + encoding |= btrfs_file_extent_other_encoding(leaf, fi); + if (extent_type != BTRFS_FILE_EXTENT_INLINE) { item_end += btrfs_file_extent_num_bytes(leaf, fi); @@ -2200,7 +2547,7 @@ search_again: if (extent_type != BTRFS_FILE_EXTENT_INLINE) { u64 num_dec; extent_start = btrfs_file_extent_disk_bytenr(leaf, fi); - if (!del_item) { + if (!del_item && !encoding) { u64 orig_num_bytes = btrfs_file_extent_num_bytes(leaf, fi); extent_num_bytes = new_size - @@ -2436,7 +2783,14 @@ int btrfs_cont_expand(struct inode *inode, loff_t size) last_byte = min(extent_map_end(em), block_end); last_byte = (last_byte + mask) & ~mask; if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) { + u64 hint_byte = 0; hole_size = last_byte - cur_offset; + err = btrfs_drop_extents(trans, root, inode, + cur_offset, + cur_offset + hole_size, + cur_offset, &hint_byte); + if (err) + break; err = btrfs_insert_file_extent(trans, root, inode->i_ino, cur_offset, 0, 0, hole_size, 0, hole_size, @@ -3785,6 +4139,7 @@ int btrfs_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct extent_io_tree *tree; + tree = &BTRFS_I(mapping->host)->io_tree; return extent_writepages(tree, mapping, btrfs_get_extent, wbc); } @@ -4285,9 +4640,11 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root) * ordered extents get created before we return */ atomic_inc(&root->fs_info->async_submit_draining); - while(atomic_read(&root->fs_info->nr_async_submits)) { + while(atomic_read(&root->fs_info->nr_async_submits) || + atomic_read(&root->fs_info->async_delalloc_pages)) { wait_event(root->fs_info->async_submit_wait, - (atomic_read(&root->fs_info->nr_async_submits) == 0)); + (atomic_read(&root->fs_info->nr_async_submits) == 0 && + atomic_read(&root->fs_info->async_delalloc_pages) == 0)); } atomic_dec(&root->fs_info->async_submit_draining); return 0; diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 370bb428559..027ad6b3839 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -390,7 +390,7 @@ void btrfs_start_ordered_extent(struct inode *inode, * start IO on any dirty ones so the wait doesn't stall waiting * for pdflush to find them */ - btrfs_fdatawrite_range(inode->i_mapping, start, end, WB_SYNC_NONE); + btrfs_fdatawrite_range(inode->i_mapping, start, end, WB_SYNC_ALL); if (wait) { wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, &entry->flags)); @@ -421,6 +421,12 @@ again: */ btrfs_fdatawrite_range(inode->i_mapping, start, orig_end, WB_SYNC_NONE); + /* The compression code will leave pages locked but return from + * writepage without setting the page writeback. Starting again + * with WB_SYNC_ALL will end up waiting for the IO to actually start. + */ + btrfs_fdatawrite_range(inode->i_mapping, start, orig_end, WB_SYNC_ALL); + btrfs_wait_on_page_writeback_range(inode->i_mapping, start >> PAGE_CACHE_SHIFT, orig_end >> PAGE_CACHE_SHIFT); @@ -448,10 +454,7 @@ again: } if (test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end, EXTENT_ORDERED | EXTENT_DELALLOC, 0)) { - printk("inode %lu still ordered or delalloc after wait " - "%llu %llu\n", inode->i_ino, - (unsigned long long)start, - (unsigned long long)orig_end); + schedule_timeout(1); goto again; } return 0; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 431fdf144b5..ab9d5e89ed1 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -375,6 +375,10 @@ int btrfs_sync_fs(struct super_block *sb, int wait) filemap_flush(root->fs_info->btree_inode->i_mapping); return 0; } + + btrfs_start_delalloc_inodes(root); + btrfs_wait_ordered_extents(root, 0); + btrfs_clean_old_snapshots(root); trans = btrfs_start_transaction(root, 1); ret = btrfs_commit_transaction(trans, root); diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index e99309180a1..ba2527d0873 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -423,8 +423,9 @@ int btrfs_zlib_decompress_biovec(struct page **pages_in, /* we didn't make progress in this inflate * call, we're done */ - if (ret != Z_STREAM_END) + if (ret != Z_STREAM_END) { ret = -1; + } break; } -- cgit v1.2.3-70-g09d2 From d20f7043fa65659136c1a7c3c456eeeb5c6f431f Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 8 Dec 2008 16:58:54 -0500 Subject: Btrfs: move data checksumming into a dedicated tree Btrfs stores checksums for each data block. Until now, they have been stored in the subvolume trees, indexed by the inode that is referencing the data block. This means that when we read the inode, we've probably read in at least some checksums as well. But, this has a few problems: * The checksums are indexed by logical offset in the file. When compression is on, this means we have to do the expensive checksumming on the uncompressed data. It would be faster if we could checksum the compressed data instead. * If we implement encryption, we'll be checksumming the plain text and storing that on disk. This is significantly less secure. * For either compression or encryption, we have to get the plain text back before we can verify the checksum as correct. This makes the raid layer balancing and extent moving much more expensive. * It makes the front end caching code more complex, as we have touch the subvolume and inodes as we cache extents. * There is potentitally one copy of the checksum in each subvolume referencing an extent. The solution used here is to store the extent checksums in a dedicated tree. This allows us to index the checksums by phyiscal extent start and length. It means: * The checksum is against the data stored on disk, after any compression or encryption is done. * The checksum is stored in a central location, and can be verified without following back references, or reading inodes. This makes compression significantly faster by reducing the amount of data that needs to be checksummed. It will also allow much faster raid management code in general. The checksums are indexed by a key with a fixed objectid (a magic value in ctree.h) and offset set to the starting byte of the extent. This allows us to copy the checksum items into the fsync log tree directly (or any other tree), without having to invent a second format for them. Signed-off-by: Chris Mason --- fs/btrfs/compression.c | 124 ++++++++++++++++++++++++++++---- fs/btrfs/ctree.h | 30 +++++--- fs/btrfs/disk-io.c | 45 ++++++++++-- fs/btrfs/extent_io.c | 5 ++ fs/btrfs/file-item.c | 185 ++++++++++++++++++------------------------------ fs/btrfs/inode.c | 45 ++++++------ fs/btrfs/ioctl.c | 55 +------------- fs/btrfs/ordered-data.c | 7 +- fs/btrfs/ordered-data.h | 10 +-- fs/btrfs/tree-log.c | 121 +++++++++++++++++++++++++++---- fs/btrfs/volumes.c | 1 + 11 files changed, 387 insertions(+), 241 deletions(-) (limited to 'fs/btrfs/ordered-data.c') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 4febe2eb0b8..ad727413730 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -69,11 +69,27 @@ struct compressed_bio { /* IO errors */ int errors; + int mirror_num; /* for reads, this is the bio we are copying the data into */ struct bio *orig_bio; + + /* + * the start of a variable length array of checksums only + * used by reads + */ + u32 sums; }; +static inline int compressed_bio_size(struct btrfs_root *root, + unsigned long disk_size) +{ + u16 csum_size = btrfs_super_csum_size(&root->fs_info->super_copy); + return sizeof(struct compressed_bio) + + ((disk_size + root->sectorsize - 1) / root->sectorsize) * + csum_size; +} + static struct bio *compressed_bio_alloc(struct block_device *bdev, u64 first_byte, gfp_t gfp_flags) { @@ -96,6 +112,47 @@ static struct bio *compressed_bio_alloc(struct block_device *bdev, return bio; } +static int check_compressed_csum(struct inode *inode, + struct compressed_bio *cb, + u64 disk_start) +{ + int ret; + struct btrfs_root *root = BTRFS_I(inode)->root; + struct page *page; + unsigned long i; + char *kaddr; + u32 csum; + u32 *cb_sum = &cb->sums; + + if (btrfs_test_opt(root, NODATASUM) || + btrfs_test_flag(inode, NODATASUM)) + return 0; + + for (i = 0; i < cb->nr_pages; i++) { + page = cb->compressed_pages[i]; + csum = ~(u32)0; + + kaddr = kmap_atomic(page, KM_USER0); + csum = btrfs_csum_data(root, kaddr, csum, PAGE_CACHE_SIZE); + btrfs_csum_final(csum, (char *)&csum); + kunmap_atomic(kaddr, KM_USER0); + + if (csum != *cb_sum) { + printk("btrfs csum failed ino %lu extent %llu csum %u " + "wanted %u mirror %d\n", inode->i_ino, + (unsigned long long)disk_start, + csum, *cb_sum, cb->mirror_num); + ret = -EIO; + goto fail; + } + cb_sum++; + + } + ret = 0; +fail: + return ret; +} + /* when we finish reading compressed pages from the disk, we * decompress them and then run the bio end_io routines on the * decompressed pages (in the inode address space). @@ -124,16 +181,21 @@ static void end_compressed_bio_read(struct bio *bio, int err) if (!atomic_dec_and_test(&cb->pending_bios)) goto out; + inode = cb->inode; + ret = check_compressed_csum(inode, cb, (u64)bio->bi_sector << 9); + if (ret) + goto csum_failed; + /* ok, we're the last bio for this extent, lets start * the decompression. */ - inode = cb->inode; tree = &BTRFS_I(inode)->io_tree; ret = btrfs_zlib_decompress_biovec(cb->compressed_pages, cb->start, cb->orig_bio->bi_io_vec, cb->orig_bio->bi_vcnt, cb->compressed_len); +csum_failed: if (ret) cb->errors = 1; @@ -148,8 +210,21 @@ static void end_compressed_bio_read(struct bio *bio, int err) /* do io completion on the original bio */ if (cb->errors) { bio_io_error(cb->orig_bio); - } else + } else { + int bio_index = 0; + struct bio_vec *bvec = cb->orig_bio->bi_io_vec; + + /* + * we have verified the checksum already, set page + * checked so the end_io handlers know about it + */ + while(bio_index < cb->orig_bio->bi_vcnt) { + SetPageChecked(bvec->bv_page); + bvec++; + bio_index++; + } bio_endio(cb->orig_bio, 0); + } /* finally free the cb struct */ kfree(cb->compressed_pages); @@ -277,12 +352,13 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, int ret; WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1)); - cb = kmalloc(sizeof(*cb), GFP_NOFS); + cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); atomic_set(&cb->pending_bios, 0); cb->errors = 0; cb->inode = inode; cb->start = start; cb->len = len; + cb->mirror_num = 0; cb->compressed_pages = compressed_pages; cb->compressed_len = compressed_len; cb->orig_bio = NULL; @@ -290,9 +366,6 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; - ret = btrfs_csum_file_bytes(root, inode, start, len); - BUG_ON(ret); - bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); bio->bi_private = cb; bio->bi_end_io = end_compressed_bio_write; @@ -325,6 +398,9 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); BUG_ON(ret); + ret = btrfs_csum_one_bio(root, inode, bio, start, 1); + BUG_ON(ret); + ret = btrfs_map_bio(root, WRITE, bio, 0, 1); BUG_ON(ret); @@ -348,6 +424,9 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); BUG_ON(ret); + ret = btrfs_csum_one_bio(root, inode, bio, start, 1); + BUG_ON(ret); + ret = btrfs_map_bio(root, WRITE, bio, 0, 1); BUG_ON(ret); @@ -510,6 +589,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, u64 em_start; struct extent_map *em; int ret; + u32 *sums; tree = &BTRFS_I(inode)->io_tree; em_tree = &BTRFS_I(inode)->extent_tree; @@ -521,15 +601,18 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, PAGE_CACHE_SIZE); spin_unlock(&em_tree->lock); - cb = kmalloc(sizeof(*cb), GFP_NOFS); + compressed_len = em->block_len; + cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); atomic_set(&cb->pending_bios, 0); cb->errors = 0; cb->inode = inode; + cb->mirror_num = mirror_num; + sums = &cb->sums; cb->start = em->orig_start; - compressed_len = em->block_len; em_len = em->len; em_start = em->start; + free_extent_map(em); em = NULL; @@ -551,11 +634,6 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, add_ra_bio_pages(inode, em_start + em_len, cb); - if (!btrfs_test_opt(root, NODATASUM) && - !btrfs_test_flag(inode, NODATASUM)) { - btrfs_lookup_bio_sums(root, inode, cb->orig_bio); - } - /* include any pages we added in add_ra-bio_pages */ uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; cb->len = uncompressed_len; @@ -568,6 +646,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, for (page_index = 0; page_index < nr_pages; page_index++) { page = cb->compressed_pages[page_index]; page->mapping = inode->i_mapping; + page->index = em_start >> PAGE_CACHE_SHIFT; + if (comp_bio->bi_size) ret = tree->ops->merge_bio_hook(page, 0, PAGE_CACHE_SIZE, @@ -591,7 +671,16 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, */ atomic_inc(&cb->pending_bios); - ret = btrfs_map_bio(root, READ, comp_bio, 0, 0); + if (!btrfs_test_opt(root, NODATASUM) && + !btrfs_test_flag(inode, NODATASUM)) { + btrfs_lookup_bio_sums(root, inode, comp_bio, + sums); + } + sums += (comp_bio->bi_size + root->sectorsize - 1) / + root->sectorsize; + + ret = btrfs_map_bio(root, READ, comp_bio, + mirror_num, 0); BUG_ON(ret); bio_put(comp_bio); @@ -610,7 +699,12 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0); BUG_ON(ret); - ret = btrfs_map_bio(root, READ, comp_bio, 0, 0); + if (!btrfs_test_opt(root, NODATASUM) && + !btrfs_test_flag(inode, NODATASUM)) { + btrfs_lookup_bio_sums(root, inode, comp_bio, sums); + } + + ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0); BUG_ON(ret); bio_put(comp_bio); diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 96f2ec7ad5b..242b961ae6d 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -73,6 +73,9 @@ struct btrfs_ordered_sum; /* directory objectid inside the root tree */ #define BTRFS_ROOT_TREE_DIR_OBJECTID 6ULL +/* holds checksums of all the data extents */ +#define BTRFS_CSUM_TREE_OBJECTID 7ULL + /* orhpan objectid for tracking unlinked/truncated files */ #define BTRFS_ORPHAN_OBJECTID -5ULL @@ -84,6 +87,13 @@ struct btrfs_ordered_sum; #define BTRFS_TREE_RELOC_OBJECTID -8ULL #define BTRFS_DATA_RELOC_TREE_OBJECTID -9ULL +/* + * extent checksums all have this objectid + * this allows them to share the logging tree + * for fsyncs + */ +#define BTRFS_EXTENT_CSUM_OBJECTID -10ULL + /* dummy objectid represents multiple objectids */ #define BTRFS_MULTIPLE_OBJECTIDS -255ULL @@ -634,6 +644,7 @@ struct btrfs_fs_info { struct btrfs_root *chunk_root; struct btrfs_root *dev_root; struct btrfs_root *fs_root; + struct btrfs_root *csum_root; /* the log root tree is a directory of all the other log roots */ struct btrfs_root *log_root_tree; @@ -716,6 +727,7 @@ struct btrfs_fs_info { struct btrfs_workers workers; struct btrfs_workers delalloc_workers; struct btrfs_workers endio_workers; + struct btrfs_workers endio_meta_workers; struct btrfs_workers endio_write_workers; struct btrfs_workers submit_workers; /* @@ -858,13 +870,12 @@ struct btrfs_root { * extent data is for file data */ #define BTRFS_EXTENT_DATA_KEY 108 + /* - * csum items have the checksums for data in the extents + * extent csums are stored in a separate tree and hold csums for + * an entire extent on disk. */ -#define BTRFS_CSUM_ITEM_KEY 120 - - -/* reserve 21-31 for other file/dir stuff */ +#define BTRFS_EXTENT_CSUM_KEY 128 /* * root items point to tree roots. There are typically in the root @@ -1917,7 +1928,7 @@ int btrfs_lookup_inode(struct btrfs_trans_handle *trans, struct btrfs_root /* file-item.c */ int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, - struct bio *bio); + struct bio *bio, u32 *dst); int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 objectid, u64 pos, @@ -1929,17 +1940,16 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, struct btrfs_path *path, u64 objectid, u64 bytenr, int mod); int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, - struct btrfs_root *root, struct inode *inode, + struct btrfs_root *root, struct btrfs_ordered_sum *sums); int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, - struct bio *bio); + struct bio *bio, u64 file_start, int contig); int btrfs_csum_file_bytes(struct btrfs_root *root, struct inode *inode, u64 start, unsigned long len); struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, - u64 objectid, u64 offset, - int cow); + u64 bytenr, int cow); int btrfs_csum_truncate(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 isize); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 3eb7c2576fe..61dc3b2c834 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -445,11 +445,18 @@ static void end_workqueue_bio(struct bio *bio, int err) end_io_wq->error = err; end_io_wq->work.func = end_workqueue_fn; end_io_wq->work.flags = 0; - if (bio->bi_rw & (1 << BIO_RW)) + + if (bio->bi_rw & (1 << BIO_RW)) { btrfs_queue_worker(&fs_info->endio_write_workers, &end_io_wq->work); - else - btrfs_queue_worker(&fs_info->endio_workers, &end_io_wq->work); + } else { + if (end_io_wq->metadata) + btrfs_queue_worker(&fs_info->endio_meta_workers, + &end_io_wq->work); + else + btrfs_queue_worker(&fs_info->endio_workers, + &end_io_wq->work); + } } int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, @@ -1208,6 +1215,9 @@ static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page) info = (struct btrfs_fs_info *)bdi->unplug_io_data; list_for_each(cur, &info->fs_devices->devices) { device = list_entry(cur, struct btrfs_device, dev_list); + if (!device->bdev) + continue; + bdi = blk_get_backing_dev_info(device->bdev); if (bdi->unplug_io_fn) { bdi->unplug_io_fn(bdi, page); @@ -1344,7 +1354,7 @@ static void end_workqueue_fn(struct btrfs_work *work) * blocksize <= pagesize, it is basically a noop */ if (end_io_wq->metadata && !bio_ready_for_csum(bio)) { - btrfs_queue_worker(&fs_info->endio_workers, + btrfs_queue_worker(&fs_info->endio_meta_workers, &end_io_wq->work); return; } @@ -1454,6 +1464,8 @@ struct btrfs_root *open_ctree(struct super_block *sb, struct buffer_head *bh; struct btrfs_root *extent_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS); + struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root), + GFP_NOFS); struct btrfs_root *tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS); struct btrfs_fs_info *fs_info = kzalloc(sizeof(*fs_info), @@ -1470,7 +1482,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, struct btrfs_super_block *disk_super; if (!extent_root || !tree_root || !fs_info || - !chunk_root || !dev_root) { + !chunk_root || !dev_root || !csum_root) { err = -ENOMEM; goto fail; } @@ -1487,6 +1499,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, init_completion(&fs_info->kobj_unregister); fs_info->tree_root = tree_root; fs_info->extent_root = extent_root; + fs_info->csum_root = csum_root; fs_info->chunk_root = chunk_root; fs_info->dev_root = dev_root; fs_info->fs_devices = fs_devices; @@ -1652,6 +1665,8 @@ struct btrfs_root *open_ctree(struct super_block *sb, btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1); btrfs_init_workers(&fs_info->endio_workers, "endio", fs_info->thread_pool_size); + btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta", + fs_info->thread_pool_size); btrfs_init_workers(&fs_info->endio_write_workers, "endio-write", fs_info->thread_pool_size); @@ -1667,6 +1682,8 @@ struct btrfs_root *open_ctree(struct super_block *sb, btrfs_start_workers(&fs_info->delalloc_workers, 1); btrfs_start_workers(&fs_info->fixup_workers, 1); btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size); + btrfs_start_workers(&fs_info->endio_meta_workers, + fs_info->thread_pool_size); btrfs_start_workers(&fs_info->endio_write_workers, fs_info->thread_pool_size); @@ -1751,6 +1768,13 @@ struct btrfs_root *open_ctree(struct super_block *sb, if (ret) goto fail_extent_root; + ret = find_and_setup_root(tree_root, fs_info, + BTRFS_CSUM_TREE_OBJECTID, csum_root); + if (ret) + goto fail_extent_root; + + csum_root->track_dirty = 1; + btrfs_read_block_groups(extent_root); fs_info->generation = generation + 1; @@ -1761,7 +1785,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root, "btrfs-cleaner"); if (!fs_info->cleaner_kthread) - goto fail_extent_root; + goto fail_csum_root; fs_info->transaction_kthread = kthread_run(transaction_kthread, tree_root, @@ -1825,6 +1849,8 @@ fail_cleaner: filemap_write_and_wait(fs_info->btree_inode->i_mapping); invalidate_inode_pages2(fs_info->btree_inode->i_mapping); +fail_csum_root: + free_extent_buffer(csum_root->node); fail_extent_root: free_extent_buffer(extent_root->node); fail_tree_root: @@ -1838,6 +1864,7 @@ fail_sb_buffer: btrfs_stop_workers(&fs_info->delalloc_workers); btrfs_stop_workers(&fs_info->workers); btrfs_stop_workers(&fs_info->endio_workers); + btrfs_stop_workers(&fs_info->endio_meta_workers); btrfs_stop_workers(&fs_info->endio_write_workers); btrfs_stop_workers(&fs_info->submit_workers); fail_iput: @@ -1853,6 +1880,7 @@ fail: kfree(fs_info); kfree(chunk_root); kfree(dev_root); + kfree(csum_root); return ERR_PTR(err); } @@ -2131,6 +2159,9 @@ int close_ctree(struct btrfs_root *root) if (root->fs_info->dev_root->node); free_extent_buffer(root->fs_info->dev_root->node); + if (root->fs_info->csum_root->node); + free_extent_buffer(root->fs_info->csum_root->node); + btrfs_free_block_groups(root->fs_info); del_fs_roots(fs_info); @@ -2141,6 +2172,7 @@ int close_ctree(struct btrfs_root *root) btrfs_stop_workers(&fs_info->delalloc_workers); btrfs_stop_workers(&fs_info->workers); btrfs_stop_workers(&fs_info->endio_workers); + btrfs_stop_workers(&fs_info->endio_meta_workers); btrfs_stop_workers(&fs_info->endio_write_workers); btrfs_stop_workers(&fs_info->submit_workers); @@ -2163,6 +2195,7 @@ int close_ctree(struct btrfs_root *root) kfree(fs_info->tree_root); kfree(fs_info->chunk_root); kfree(fs_info->dev_root); + kfree(fs_info->csum_root); return 0; } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index c3dfe2a0ec8..7449ecf32c5 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1732,6 +1732,9 @@ static void end_bio_extent_readpage(struct bio *bio, int err) int whole_page; int ret; + if (err) + uptodate = 0; + do { struct page *page = bvec->bv_page; tree = &BTRFS_I(page->mapping->host)->io_tree; @@ -1761,6 +1764,8 @@ static void end_bio_extent_readpage(struct bio *bio, int err) if (ret == 0) { uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); + if (err) + uptodate = 0; continue; } } diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 234ed441736..a3ad2ce0011 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -74,8 +74,7 @@ out: struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, - u64 objectid, u64 offset, - int cow) + u64 bytenr, int cow) { int ret; struct btrfs_key file_key; @@ -87,9 +86,9 @@ struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans, btrfs_super_csum_size(&root->fs_info->super_copy); int csums_in_item; - file_key.objectid = objectid; - file_key.offset = offset; - btrfs_set_key_type(&file_key, BTRFS_CSUM_ITEM_KEY); + file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; + file_key.offset = bytenr; + btrfs_set_key_type(&file_key, BTRFS_EXTENT_CSUM_KEY); ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow); if (ret < 0) goto fail; @@ -100,11 +99,10 @@ struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans, goto fail; path->slots[0]--; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - if (btrfs_key_type(&found_key) != BTRFS_CSUM_ITEM_KEY || - found_key.objectid != objectid) { + if (btrfs_key_type(&found_key) != BTRFS_EXTENT_CSUM_KEY) goto fail; - } - csum_offset = (offset - found_key.offset) >> + + csum_offset = (bytenr - found_key.offset) >> root->fs_info->sb->s_blocksize_bits; csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]); csums_in_item /= csum_size; @@ -143,7 +141,7 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, } int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, - struct bio *bio) + struct bio *bio, u32 *dst) { u32 sum; struct bio_vec *bvec = bio->bi_io_vec; @@ -151,6 +149,7 @@ int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, u64 offset; u64 item_start_offset = 0; u64 item_last_offset = 0; + u64 disk_bytenr; u32 diff; u16 csum_size = btrfs_super_csum_size(&root->fs_info->super_copy); @@ -165,21 +164,22 @@ int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, WARN_ON(bio->bi_vcnt <= 0); + disk_bytenr = (u64)bio->bi_sector << 9; while(bio_index < bio->bi_vcnt) { offset = page_offset(bvec->bv_page) + bvec->bv_offset; - ret = btrfs_find_ordered_sum(inode, offset, &sum); + ret = btrfs_find_ordered_sum(inode, offset, disk_bytenr, &sum); if (ret == 0) goto found; - if (!item || offset < item_start_offset || - offset >= item_last_offset) { + if (!item || disk_bytenr < item_start_offset || + disk_bytenr >= item_last_offset) { struct btrfs_key found_key; u32 item_size; if (item) btrfs_release_path(root, path); - item = btrfs_lookup_csum(NULL, root, path, - inode->i_ino, offset, 0); + item = btrfs_lookup_csum(NULL, root->fs_info->csum_root, + path, disk_bytenr, 0); if (IS_ERR(item)) { ret = PTR_ERR(item); if (ret == -ENOENT || ret == -EFBIG) @@ -208,7 +208,7 @@ int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, * this byte range must be able to fit inside * a single leaf so it will also fit inside a u32 */ - diff = offset - item_start_offset; + diff = disk_bytenr - item_start_offset; diff = diff / root->sectorsize; diff = diff * csum_size; @@ -216,7 +216,11 @@ int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, ((unsigned long)item) + diff, csum_size); found: - set_state_private(io_tree, offset, sum); + if (dst) + *dst++ = sum; + else + set_state_private(io_tree, offset, sum); + disk_bytenr += bvec->bv_len; bio_index++; bvec++; } @@ -224,75 +228,8 @@ found: return 0; } -int btrfs_csum_file_bytes(struct btrfs_root *root, struct inode *inode, - u64 start, unsigned long len) -{ - struct btrfs_ordered_sum *sums; - struct btrfs_sector_sum *sector_sum; - struct btrfs_ordered_extent *ordered; - char *data; - struct page *page; - unsigned long total_bytes = 0; - unsigned long this_sum_bytes = 0; - - sums = kzalloc(btrfs_ordered_sum_size(root, len), GFP_NOFS); - if (!sums) - return -ENOMEM; - - sector_sum = sums->sums; - sums->file_offset = start; - sums->len = len; - INIT_LIST_HEAD(&sums->list); - ordered = btrfs_lookup_ordered_extent(inode, sums->file_offset); - BUG_ON(!ordered); - - while(len > 0) { - if (start >= ordered->file_offset + ordered->len || - start < ordered->file_offset) { - sums->len = this_sum_bytes; - this_sum_bytes = 0; - btrfs_add_ordered_sum(inode, ordered, sums); - btrfs_put_ordered_extent(ordered); - - sums = kzalloc(btrfs_ordered_sum_size(root, len), - GFP_NOFS); - BUG_ON(!sums); - sector_sum = sums->sums; - sums->len = len; - sums->file_offset = start; - ordered = btrfs_lookup_ordered_extent(inode, - sums->file_offset); - BUG_ON(!ordered); - } - - page = find_get_page(inode->i_mapping, - start >> PAGE_CACHE_SHIFT); - - data = kmap_atomic(page, KM_USER0); - sector_sum->sum = ~(u32)0; - sector_sum->sum = btrfs_csum_data(root, data, sector_sum->sum, - PAGE_CACHE_SIZE); - kunmap_atomic(data, KM_USER0); - btrfs_csum_final(sector_sum->sum, - (char *)§or_sum->sum); - sector_sum->offset = page_offset(page); - page_cache_release(page); - - sector_sum++; - total_bytes += PAGE_CACHE_SIZE; - this_sum_bytes += PAGE_CACHE_SIZE; - start += PAGE_CACHE_SIZE; - - WARN_ON(len < PAGE_CACHE_SIZE); - len -= PAGE_CACHE_SIZE; - } - btrfs_add_ordered_sum(inode, ordered, sums); - btrfs_put_ordered_extent(ordered); - return 0; -} - int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, - struct bio *bio) + struct bio *bio, u64 file_start, int contig) { struct btrfs_ordered_sum *sums; struct btrfs_sector_sum *sector_sum; @@ -303,6 +240,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, unsigned long total_bytes = 0; unsigned long this_sum_bytes = 0; u64 offset; + u64 disk_bytenr; WARN_ON(bio->bi_vcnt <= 0); sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_size), GFP_NOFS); @@ -310,16 +248,25 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, return -ENOMEM; sector_sum = sums->sums; - sums->file_offset = page_offset(bvec->bv_page) + bvec->bv_offset; + disk_bytenr = (u64)bio->bi_sector << 9; sums->len = bio->bi_size; INIT_LIST_HEAD(&sums->list); - ordered = btrfs_lookup_ordered_extent(inode, sums->file_offset); + + if (contig) + offset = file_start; + else + offset = page_offset(bvec->bv_page) + bvec->bv_offset; + + ordered = btrfs_lookup_ordered_extent(inode, offset); BUG_ON(!ordered); + sums->bytenr = ordered->start; while(bio_index < bio->bi_vcnt) { - offset = page_offset(bvec->bv_page) + bvec->bv_offset; - if (offset >= ordered->file_offset + ordered->len || - offset < ordered->file_offset) { + if (!contig) + offset = page_offset(bvec->bv_page) + bvec->bv_offset; + + if (!contig && (offset >= ordered->file_offset + ordered->len || + offset < ordered->file_offset)) { unsigned long bytes_left; sums->len = this_sum_bytes; this_sum_bytes = 0; @@ -333,10 +280,9 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, BUG_ON(!sums); sector_sum = sums->sums; sums->len = bytes_left; - sums->file_offset = offset; - ordered = btrfs_lookup_ordered_extent(inode, - sums->file_offset); + ordered = btrfs_lookup_ordered_extent(inode, offset); BUG_ON(!ordered); + sums->bytenr = ordered->start; } data = kmap_atomic(bvec->bv_page, KM_USER0); @@ -348,13 +294,14 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, kunmap_atomic(data, KM_USER0); btrfs_csum_final(sector_sum->sum, (char *)§or_sum->sum); - sector_sum->offset = page_offset(bvec->bv_page) + - bvec->bv_offset; + sector_sum->bytenr = disk_bytenr; sector_sum++; bio_index++; total_bytes += bvec->bv_len; this_sum_bytes += bvec->bv_len; + disk_bytenr += bvec->bv_len; + offset += bvec->bv_len; bvec++; } this_sum_bytes = 0; @@ -364,11 +311,10 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, } int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, - struct btrfs_root *root, struct inode *inode, + struct btrfs_root *root, struct btrfs_ordered_sum *sums) { - u64 objectid = inode->i_ino; - u64 offset; + u64 bytenr; int ret; struct btrfs_key file_key; struct btrfs_key found_key; @@ -396,13 +342,12 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, again: next_offset = (u64)-1; found_next = 0; - offset = sector_sum->offset; - file_key.objectid = objectid; - file_key.offset = offset; - btrfs_set_key_type(&file_key, BTRFS_CSUM_ITEM_KEY); + file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; + file_key.offset = sector_sum->bytenr; + bytenr = sector_sum->bytenr; + btrfs_set_key_type(&file_key, BTRFS_EXTENT_CSUM_KEY); - mutex_lock(&BTRFS_I(inode)->csum_mutex); - item = btrfs_lookup_csum(trans, root, path, objectid, offset, 1); + item = btrfs_lookup_csum(trans, root, path, sector_sum->bytenr, 1); if (!IS_ERR(item)) { leaf = path->nodes[0]; ret = 0; @@ -432,8 +377,8 @@ again: slot = 0; } btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot); - if (found_key.objectid != objectid || - found_key.type != BTRFS_CSUM_ITEM_KEY) { + if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || + found_key.type != BTRFS_EXTENT_CSUM_KEY) { found_next = 1; goto insert; } @@ -460,10 +405,10 @@ again: path->slots[0]--; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - csum_offset = (offset - found_key.offset) >> + csum_offset = (bytenr - found_key.offset) >> root->fs_info->sb->s_blocksize_bits; - if (btrfs_key_type(&found_key) != BTRFS_CSUM_ITEM_KEY || - found_key.objectid != objectid || + if (btrfs_key_type(&found_key) != BTRFS_EXTENT_CSUM_KEY || + found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || csum_offset >= MAX_CSUM_ITEMS(root, csum_size)) { goto insert; } @@ -482,8 +427,18 @@ insert: btrfs_release_path(root, path); csum_offset = 0; if (found_next) { - u64 tmp = min((u64)i_size_read(inode), next_offset); - tmp -= offset & ~((u64)root->sectorsize -1); + u64 tmp = total_bytes + root->sectorsize; + u64 next_sector = sector_sum->bytenr; + struct btrfs_sector_sum *next = sector_sum + 1; + + while(tmp < sums->len) { + if (next_sector + root->sectorsize != next->bytenr) + break; + tmp += root->sectorsize; + next_sector = next->bytenr; + next++; + } + tmp = min(tmp, next_offset - file_key.offset); tmp >>= root->fs_info->sb->s_blocksize_bits; tmp = max((u64)1, tmp); tmp = min(tmp, (u64)MAX_CSUM_ITEMS(root, csum_size)); @@ -510,7 +465,6 @@ found: item_end = (struct btrfs_csum_item *)((unsigned char *)item_end + btrfs_item_size_nr(leaf, path->slots[0])); eb_token = NULL; - mutex_unlock(&BTRFS_I(inode)->csum_mutex); cond_resched(); next_sector: @@ -541,9 +495,9 @@ next_sector: if (total_bytes < sums->len) { item = (struct btrfs_csum_item *)((char *)item + csum_size); - if (item < item_end && offset + PAGE_CACHE_SIZE == - sector_sum->offset) { - offset = sector_sum->offset; + if (item < item_end && bytenr + PAGE_CACHE_SIZE == + sector_sum->bytenr) { + bytenr = sector_sum->bytenr; goto next_sector; } } @@ -562,7 +516,6 @@ out: return ret; fail_unlock: - mutex_unlock(&BTRFS_I(inode)->csum_mutex); goto out; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 09efc9473a3..c03d847b8c4 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1221,7 +1221,7 @@ static int __btrfs_submit_bio_start(struct inode *inode, int rw, struct bio *bio struct btrfs_root *root = BTRFS_I(inode)->root; int ret = 0; - ret = btrfs_csum_one_bio(root, inode, bio); + ret = btrfs_csum_one_bio(root, inode, bio, 0, 0); BUG_ON(ret); return 0; } @@ -1259,12 +1259,11 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, btrfs_test_flag(inode, NODATASUM); if (!(rw & (1 << BIO_RW))) { - - if (bio_flags & EXTENT_BIO_COMPRESSED) + if (bio_flags & EXTENT_BIO_COMPRESSED) { return btrfs_submit_compressed_read(inode, bio, mirror_num, bio_flags); - else if (!skip_sum) - btrfs_lookup_bio_sums(root, inode, bio); + } else if (!skip_sum) + btrfs_lookup_bio_sums(root, inode, bio, NULL); goto mapit; } else if (!skip_sum) { /* we're doing a write, do the async checksumming */ @@ -1292,8 +1291,8 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans, btrfs_set_trans_block_group(trans, inode); list_for_each(cur, list) { sum = list_entry(cur, struct btrfs_ordered_sum, list); - btrfs_csum_file_blocks(trans, BTRFS_I(inode)->root, - inode, sum); + btrfs_csum_file_blocks(trans, + BTRFS_I(inode)->root->fs_info->csum_root, sum); } return 0; } @@ -1545,6 +1544,7 @@ struct io_failure_record { u64 start; u64 len; u64 logical; + unsigned long bio_flags; int last_mirror; }; @@ -1563,7 +1563,6 @@ static int btrfs_io_failed_hook(struct bio *failed_bio, int ret; int rw; u64 logical; - unsigned long bio_flags = 0; ret = get_state_private(failure_tree, start, &private); if (ret) { @@ -1573,6 +1572,7 @@ static int btrfs_io_failed_hook(struct bio *failed_bio, failrec->start = start; failrec->len = end - start + 1; failrec->last_mirror = 0; + failrec->bio_flags = 0; spin_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, start, failrec->len); @@ -1588,8 +1588,10 @@ static int btrfs_io_failed_hook(struct bio *failed_bio, } logical = start - em->start; logical = em->block_start + logical; - if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) - bio_flags = EXTENT_BIO_COMPRESSED; + if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { + logical = em->block_start; + failrec->bio_flags = EXTENT_BIO_COMPRESSED; + } failrec->logical = logical; free_extent_map(em); set_extent_bits(failure_tree, start, end, EXTENT_LOCKED | @@ -1626,6 +1628,7 @@ static int btrfs_io_failed_hook(struct bio *failed_bio, bio->bi_sector = failrec->logical >> 9; bio->bi_bdev = failed_bio->bi_bdev; bio->bi_size = 0; + bio_add_page(bio, page, failrec->len, start - page_offset(page)); if (failed_bio->bi_rw & (1 << BIO_RW)) rw = WRITE; @@ -1634,7 +1637,7 @@ static int btrfs_io_failed_hook(struct bio *failed_bio, BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio, failrec->last_mirror, - bio_flags); + failrec->bio_flags); return 0; } @@ -1688,9 +1691,14 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, u32 csum = ~(u32)0; unsigned long flags; + if (PageChecked(page)) { + ClearPageChecked(page); + goto good; + } if (btrfs_test_opt(root, NODATASUM) || btrfs_test_flag(inode, NODATASUM)) return 0; + if (state && state->start == start) { private = state->private; ret = 0; @@ -1709,7 +1717,7 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, } kunmap_atomic(kaddr, KM_IRQ0); local_irq_restore(flags); - +good: /* if the io failure tree for this inode is non-empty, * check to see if we've recovered from a failed IO */ @@ -2243,6 +2251,7 @@ fail: return err; } +#if 0 /* * when truncating bytes in a file, it is possible to avoid reading * the leaves that contain only checksum items. This can be the @@ -2410,6 +2419,8 @@ out: return ret; } +#endif + /* * this can truncate away extent items, csum items and directory items. * It starts at a high offset and removes keys until it can't find @@ -2459,9 +2470,6 @@ noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, btrfs_init_path(path); - ret = drop_csum_leaves(trans, root, path, inode, new_size); - BUG_ON(ret); - search_again: ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret < 0) { @@ -2509,16 +2517,11 @@ search_again: } item_end--; } - if (found_type == BTRFS_CSUM_ITEM_KEY) { - ret = btrfs_csum_truncate(trans, root, path, - new_size); - BUG_ON(ret); - } if (item_end < new_size) { if (found_type == BTRFS_DIR_ITEM_KEY) { found_type = BTRFS_INODE_ITEM_KEY; } else if (found_type == BTRFS_EXTENT_ITEM_KEY) { - found_type = BTRFS_CSUM_ITEM_KEY; + found_type = BTRFS_EXTENT_DATA_KEY; } else if (found_type == BTRFS_EXTENT_DATA_KEY) { found_type = BTRFS_XATTR_ITEM_KEY; } else if (found_type == BTRFS_XATTR_ITEM_KEY) { diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index b4da53d55c8..6228b69c2b9 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -714,8 +714,7 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, u64 len = olen; u64 bs = root->fs_info->sb->s_blocksize; u64 hint_byte; - u16 csum_size = - btrfs_super_csum_size(&root->fs_info->super_copy); + /* * TODO: * - split compressed inline extents. annoying: we need to @@ -833,7 +832,7 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, slot = path->slots[0]; btrfs_item_key_to_cpu(leaf, &key, slot); - if (btrfs_key_type(&key) > BTRFS_CSUM_ITEM_KEY || + if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY || key.objectid != src->i_ino) break; @@ -958,56 +957,6 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, btrfs_mark_buffer_dirty(leaf); } - if (btrfs_key_type(&key) == BTRFS_CSUM_ITEM_KEY) { - u32 size; - struct btrfs_key new_key; - u64 coverslen; - int coff, clen; - - size = btrfs_item_size_nr(leaf, slot); - coverslen = (size / csum_size) << - root->fs_info->sb->s_blocksize_bits; - printk("csums for %llu~%llu\n", - key.offset, coverslen); - if (key.offset + coverslen < off || - key.offset >= off+len) - goto next; - - read_extent_buffer(leaf, buf, - btrfs_item_ptr_offset(leaf, slot), - size); - btrfs_release_path(root, path); - - coff = 0; - if (off > key.offset) - coff = ((off - key.offset) >> - root->fs_info->sb->s_blocksize_bits) * - csum_size; - clen = size - coff; - if (key.offset + coverslen > off+len) - clen -= ((key.offset+coverslen-off-len) >> - root->fs_info->sb->s_blocksize_bits) * - csum_size; - printk(" will dup %d~%d of %d\n", - coff, clen, size); - - memcpy(&new_key, &key, sizeof(new_key)); - new_key.objectid = inode->i_ino; - new_key.offset = key.offset + destoff - off; - - ret = btrfs_insert_empty_item(trans, root, path, - &new_key, clen); - if (ret) - goto out; - - leaf = path->nodes[0]; - slot = path->slots[0]; - write_extent_buffer(leaf, buf + coff, - btrfs_item_ptr_offset(leaf, slot), - clen); - btrfs_mark_buffer_dirty(leaf); - } - next: btrfs_release_path(root, path); key.offset++; diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 027ad6b3839..d9e232227da 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -610,7 +610,8 @@ out: * try to find a checksum. This is used because we allow pages to * be reclaimed before their checksum is actually put into the btree */ -int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u32 *sum) +int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, + u32 *sum) { struct btrfs_ordered_sum *ordered_sum; struct btrfs_sector_sum *sector_sums; @@ -629,11 +630,11 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u32 *sum) mutex_lock(&tree->mutex); list_for_each_prev(cur, &ordered->list) { ordered_sum = list_entry(cur, struct btrfs_ordered_sum, list); - if (offset >= ordered_sum->file_offset) { + if (disk_bytenr >= ordered_sum->bytenr) { num_sectors = ordered_sum->len / sectorsize; sector_sums = ordered_sum->sums; for (i = 0; i < num_sectors; i++) { - if (sector_sums[i].offset == offset) { + if (sector_sums[i].bytenr == disk_bytenr) { *sum = sector_sums[i].sum; ret = 0; goto out; diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index 260bf95dfe0..ab66d5e8d6d 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -33,15 +33,17 @@ struct btrfs_ordered_inode_tree { * the ordered extent are on disk */ struct btrfs_sector_sum { - u64 offset; + /* bytenr on disk */ + u64 bytenr; u32 sum; }; struct btrfs_ordered_sum { - u64 file_offset; + /* bytenr is the start of this extent on disk */ + u64 bytenr; + /* * this is the length in bytes covered by the sums array below. - * But, the sums array may not be contiguous in the file. */ unsigned long len; struct list_head list; @@ -147,7 +149,7 @@ struct btrfs_ordered_extent * btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset); int btrfs_ordered_update_i_size(struct inode *inode, struct btrfs_ordered_extent *ordered); -int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u32 *sum); +int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum); int btrfs_wait_on_page_writeback_range(struct address_space *mapping, pgoff_t start, pgoff_t end); int btrfs_fdatawrite_range(struct address_space *mapping, loff_t start, diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index c766649ad45..08469ec0585 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -934,24 +934,17 @@ static noinline int replay_one_csum(struct btrfs_trans_handle *trans, unsigned long file_bytes; struct btrfs_ordered_sum *sums; struct btrfs_sector_sum *sector_sum; - struct inode *inode; unsigned long ptr; file_bytes = (item_size / csum_size) * root->sectorsize; - inode = read_one_inode(root, key->objectid); - if (!inode) { - return -EIO; - } - sums = kzalloc(btrfs_ordered_sum_size(root, file_bytes), GFP_NOFS); if (!sums) { - iput(inode); return -ENOMEM; } INIT_LIST_HEAD(&sums->list); sums->len = file_bytes; - sums->file_offset = key->offset; + sums->bytenr = key->offset; /* * copy all the sums into the ordered sum struct @@ -960,7 +953,7 @@ static noinline int replay_one_csum(struct btrfs_trans_handle *trans, cur_offset = key->offset; ptr = btrfs_item_ptr_offset(eb, slot); while(item_size > 0) { - sector_sum->offset = cur_offset; + sector_sum->bytenr = cur_offset; read_extent_buffer(eb, §or_sum->sum, ptr, csum_size); sector_sum++; item_size -= csum_size; @@ -969,11 +962,9 @@ static noinline int replay_one_csum(struct btrfs_trans_handle *trans, } /* let btrfs_csum_file_blocks add them into the file */ - ret = btrfs_csum_file_blocks(trans, root, inode, sums); + ret = btrfs_csum_file_blocks(trans, root->fs_info->csum_root, sums); BUG_ON(ret); kfree(sums); - iput(inode); - return 0; } /* @@ -1670,7 +1661,7 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, ret = replay_one_extent(wc->trans, root, path, eb, i, &key); BUG_ON(ret); - } else if (key.type == BTRFS_CSUM_ITEM_KEY) { + } else if (key.type == BTRFS_EXTENT_CSUM_KEY) { ret = replay_one_csum(wc->trans, root, path, eb, i, &key); BUG_ON(ret); @@ -2466,6 +2457,85 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans, return 0; } +static noinline int copy_extent_csums(struct btrfs_trans_handle *trans, + struct list_head *list, + struct btrfs_root *root, + u64 disk_bytenr, u64 len) +{ + struct btrfs_ordered_sum *sums; + struct btrfs_sector_sum *sector_sum; + int ret; + struct btrfs_path *path; + struct btrfs_csum_item *item = NULL; + u64 end = disk_bytenr + len; + u64 item_start_offset = 0; + u64 item_last_offset = 0; + u32 diff; + u32 sum; + u16 csum_size = btrfs_super_csum_size(&root->fs_info->super_copy); + + sums = kzalloc(btrfs_ordered_sum_size(root, len), GFP_NOFS); + + sector_sum = sums->sums; + sums->bytenr = disk_bytenr; + sums->len = len; + list_add_tail(&sums->list, list); + + path = btrfs_alloc_path(); + while(disk_bytenr < end) { + if (!item || disk_bytenr < item_start_offset || + disk_bytenr >= item_last_offset) { + struct btrfs_key found_key; + u32 item_size; + + if (item) + btrfs_release_path(root, path); + item = btrfs_lookup_csum(NULL, root, path, + disk_bytenr, 0); + if (IS_ERR(item)) { + ret = PTR_ERR(item); + if (ret == -ENOENT || ret == -EFBIG) + ret = 0; + sum = 0; + printk("log no csum found for byte %llu\n", + (unsigned long long)disk_bytenr); + item = NULL; + btrfs_release_path(root, path); + goto found; + } + btrfs_item_key_to_cpu(path->nodes[0], &found_key, + path->slots[0]); + + item_start_offset = found_key.offset; + item_size = btrfs_item_size_nr(path->nodes[0], + path->slots[0]); + item_last_offset = item_start_offset + + (item_size / csum_size) * + root->sectorsize; + item = btrfs_item_ptr(path->nodes[0], path->slots[0], + struct btrfs_csum_item); + } + /* + * this byte range must be able to fit inside + * a single leaf so it will also fit inside a u32 + */ + diff = disk_bytenr - item_start_offset; + diff = diff / root->sectorsize; + diff = diff * csum_size; + + read_extent_buffer(path->nodes[0], &sum, + ((unsigned long)item) + diff, + csum_size); +found: + sector_sum->bytenr = disk_bytenr; + sector_sum->sum = sum; + disk_bytenr += root->sectorsize; + sector_sum++; + } + btrfs_free_path(path); + return 0; +} + static noinline int copy_items(struct btrfs_trans_handle *trans, struct btrfs_root *log, struct btrfs_path *dst_path, @@ -2481,6 +2551,9 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, u32 *ins_sizes; char *ins_data; int i; + struct list_head ordered_sums; + + INIT_LIST_HEAD(&ordered_sums); ins_data = kmalloc(nr * sizeof(struct btrfs_key) + nr * sizeof(u32), GFP_NOFS); @@ -2535,6 +2608,9 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, extent); u64 dl = btrfs_file_extent_disk_num_bytes(src, extent); + u64 cs = btrfs_file_extent_offset(src, extent); + u64 cl = btrfs_file_extent_num_bytes(src, + extent);; /* ds == 0 is a hole */ if (ds != 0) { ret = btrfs_inc_extent_ref(trans, log, @@ -2544,6 +2620,11 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, trans->transid, ins_keys[i].objectid); BUG_ON(ret); + ret = copy_extent_csums(trans, + &ordered_sums, + log->fs_info->csum_root, + ds + cs, cl); + BUG_ON(ret); } } } @@ -2553,6 +2634,20 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, btrfs_mark_buffer_dirty(dst_path->nodes[0]); btrfs_release_path(log, dst_path); kfree(ins_data); + + /* + * we have to do this after the loop above to avoid changing the + * log tree while trying to change the log tree. + */ + while(!list_empty(&ordered_sums)) { + struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, + struct btrfs_ordered_sum, + list); + ret = btrfs_csum_file_blocks(trans, log, sums); + BUG_ON(ret); + list_del(&sums->list); + kfree(sums); + } return 0; } diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 6c523b3360f..2049d179ccd 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -2771,6 +2771,7 @@ static struct btrfs_device *add_missing_dev(struct btrfs_root *root, device->work.func = pending_bios_fn; fs_devices->num_devices++; spin_lock_init(&device->io_lock); + INIT_LIST_HEAD(&device->dev_alloc_list); memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE); return device; } -- cgit v1.2.3-70-g09d2 From d397712bcc6a759a560fd247e6053ecae091f958 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 5 Jan 2009 21:25:51 -0500 Subject: Btrfs: Fix checkpatch.pl warnings There were many, most are fixed now. struct-funcs.c generates some warnings but these are bogus. Signed-off-by: Chris Mason --- fs/btrfs/acl.c | 5 +- fs/btrfs/async-thread.c | 6 +- fs/btrfs/compat.h | 4 +- fs/btrfs/compression.c | 12 +-- fs/btrfs/ctree.c | 217 ++++++++++++++++-------------------------- fs/btrfs/ctree.h | 30 +++--- fs/btrfs/dir-item.c | 2 +- fs/btrfs/disk-io.c | 187 +++++++++++++------------------------ fs/btrfs/export.c | 8 +- fs/btrfs/extent-tree.c | 223 +++++++++++++++++++++++--------------------- fs/btrfs/extent_io.c | 213 ++++++++++++++++++++---------------------- fs/btrfs/extent_map.c | 14 +-- fs/btrfs/file-item.c | 18 ++-- fs/btrfs/file.c | 49 +++++----- fs/btrfs/free-space-cache.c | 37 ++++---- fs/btrfs/inode-map.c | 1 - fs/btrfs/inode.c | 173 +++++++++++++++++----------------- fs/btrfs/ioctl.c | 37 ++++---- fs/btrfs/locking.c | 5 +- fs/btrfs/ordered-data.c | 34 +++---- fs/btrfs/print-tree.c | 73 +++++++++------ fs/btrfs/ref-cache.c | 12 +-- fs/btrfs/root-tree.c | 17 ++-- fs/btrfs/struct-funcs.c | 4 +- fs/btrfs/super.c | 25 +++-- fs/btrfs/sysfs.c | 6 +- fs/btrfs/transaction.c | 45 ++++----- fs/btrfs/transaction.h | 6 +- fs/btrfs/tree-defrag.c | 9 +- fs/btrfs/tree-log.c | 70 +++++++------- fs/btrfs/volumes.c | 78 ++++++++-------- fs/btrfs/xattr.c | 3 +- fs/btrfs/zlib.c | 45 ++++----- 33 files changed, 770 insertions(+), 898 deletions(-) (limited to 'fs/btrfs/ordered-data.c') diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index 867eaf1f8ef..1d53b62dbba 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c @@ -161,8 +161,7 @@ static int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type) ret = __btrfs_setxattr(inode, name, value, size, 0); out: - if (value) - kfree(value); + kfree(value); if (!ret) btrfs_update_cached_acl(inode, p_acl, acl); @@ -213,7 +212,7 @@ static int btrfs_xattr_acl_default_get(struct inode *inode, const char *name, } static int btrfs_xattr_acl_default_set(struct inode *inode, const char *name, - const void *value, size_t size, int flags) + const void *value, size_t size, int flags) { return btrfs_xattr_set_acl(inode, ACL_TYPE_DEFAULT, value, size); } diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 4229450b759..8e2fec05dbe 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c @@ -104,7 +104,7 @@ static noinline int run_ordered_completions(struct btrfs_workers *workers, spin_lock_irqsave(&workers->lock, flags); - while(!list_empty(&workers->order_list)) { + while (!list_empty(&workers->order_list)) { work = list_entry(workers->order_list.next, struct btrfs_work, order_list); @@ -143,7 +143,7 @@ static int worker_loop(void *arg) struct btrfs_work *work; do { spin_lock_irq(&worker->lock); - while(!list_empty(&worker->pending)) { + while (!list_empty(&worker->pending)) { cur = worker->pending.next; work = list_entry(cur, struct btrfs_work, list); list_del(&work->list); @@ -188,7 +188,7 @@ int btrfs_stop_workers(struct btrfs_workers *workers) struct btrfs_worker_thread *worker; list_splice_init(&workers->idle_list, &workers->worker_list); - while(!list_empty(&workers->worker_list)) { + while (!list_empty(&workers->worker_list)) { cur = workers->worker_list.next; worker = list_entry(cur, struct btrfs_worker_thread, worker_list); diff --git a/fs/btrfs/compat.h b/fs/btrfs/compat.h index 75e4426d6fb..594d60bdd3c 100644 --- a/fs/btrfs/compat.h +++ b/fs/btrfs/compat.h @@ -4,7 +4,7 @@ #define btrfs_drop_nlink(inode) drop_nlink(inode) #define btrfs_inc_nlink(inode) inc_nlink(inode) -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27) +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 27) static inline struct dentry *d_obtain_alias(struct inode *inode) { struct dentry *d; @@ -21,7 +21,7 @@ static inline struct dentry *d_obtain_alias(struct inode *inode) } #endif -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28) # define __pagevec_lru_add_file __pagevec_lru_add # define open_bdev_exclusive open_bdev_excl # define close_bdev_exclusive(bdev, mode) close_bdev_excl(bdev) diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 2436163d543..ee848d8585d 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -137,7 +137,8 @@ static int check_compressed_csum(struct inode *inode, kunmap_atomic(kaddr, KM_USER0); if (csum != *cb_sum) { - printk("btrfs csum failed ino %lu extent %llu csum %u " + printk(KERN_INFO "btrfs csum failed ino %lu " + "extent %llu csum %u " "wanted %u mirror %d\n", inode->i_ino, (unsigned long long)disk_start, csum, *cb_sum, cb->mirror_num); @@ -217,7 +218,7 @@ csum_failed: * we have verified the checksum already, set page * checked so the end_io handlers know about it */ - while(bio_index < cb->orig_bio->bi_vcnt) { + while (bio_index < cb->orig_bio->bi_vcnt) { SetPageChecked(bvec->bv_page); bvec++; bio_index++; @@ -246,7 +247,7 @@ static noinline int end_compressed_writeback(struct inode *inode, u64 start, int i; int ret; - while(nr_pages > 0) { + while (nr_pages > 0) { ret = find_get_pages_contig(inode->i_mapping, index, min_t(unsigned long, nr_pages, ARRAY_SIZE(pages)), pages); @@ -463,7 +464,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; pagevec_init(&pvec, 0); - while(last_offset < compressed_end) { + while (last_offset < compressed_end) { page_index = last_offset >> PAGE_CACHE_SHIFT; if (page_index > end_index) @@ -697,9 +698,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0); BUG_ON(ret); - if (!btrfs_test_flag(inode, NODATASUM)) { + if (!btrfs_test_flag(inode, NODATASUM)) btrfs_lookup_bio_sums(root, inode, comp_bio, sums); - } ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0); BUG_ON(ret); diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 7fad2e3ad6f..9e46c077681 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -67,7 +67,7 @@ void btrfs_free_path(struct btrfs_path *p) * * It is safe to call this on paths that no locks or extent buffers held. */ -void noinline btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p) +noinline void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p) { int i; @@ -112,7 +112,7 @@ struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root) { struct extent_buffer *eb; - while(1) { + while (1) { eb = btrfs_root_node(root); btrfs_tree_lock(eb); @@ -202,22 +202,22 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans, } /* - * does the dirty work in cow of a single block. The parent block - * (if supplied) is updated to point to the new cow copy. The new - * buffer is marked dirty and returned locked. If you modify the block - * it needs to be marked dirty again. + * does the dirty work in cow of a single block. The parent block (if + * supplied) is updated to point to the new cow copy. The new buffer is marked + * dirty and returned locked. If you modify the block it needs to be marked + * dirty again. * * search_start -- an allocation hint for the new block * - * empty_size -- a hint that you plan on doing more cow. This is the size in bytes - * the allocator should try to find free next to the block it returns. This is - * just a hint and may be ignored by the allocator. + * empty_size -- a hint that you plan on doing more cow. This is the size in + * bytes the allocator should try to find free next to the block it returns. + * This is just a hint and may be ignored by the allocator. * * prealloc_dest -- if you have already reserved a destination for the cow, - * this uses that block instead of allocating a new one. btrfs_alloc_reserved_extent - * is used to finish the allocation. + * this uses that block instead of allocating a new one. + * btrfs_alloc_reserved_extent is used to finish the allocation. */ -static int noinline __btrfs_cow_block(struct btrfs_trans_handle *trans, +static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, struct extent_buffer *parent, int parent_slot, @@ -366,7 +366,7 @@ static int noinline __btrfs_cow_block(struct btrfs_trans_handle *trans, * This version of it has extra checks so that a block isn't cow'd more than * once per transaction, as long as it hasn't been written yet */ -int noinline btrfs_cow_block(struct btrfs_trans_handle *trans, +noinline int btrfs_cow_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, struct extent_buffer *parent, int parent_slot, struct extent_buffer **cow_ret, u64 prealloc_dest) @@ -375,13 +375,16 @@ int noinline btrfs_cow_block(struct btrfs_trans_handle *trans, int ret; if (trans->transaction != root->fs_info->running_transaction) { - printk(KERN_CRIT "trans %Lu running %Lu\n", trans->transid, + printk(KERN_CRIT "trans %llu running %llu\n", + (unsigned long long)trans->transid, + (unsigned long long) root->fs_info->running_transaction->transid); WARN_ON(1); } if (trans->transid != root->fs_info->generation) { - printk(KERN_CRIT "trans %Lu running %Lu\n", trans->transid, - root->fs_info->generation); + printk(KERN_CRIT "trans %llu running %llu\n", + (unsigned long long)trans->transid, + (unsigned long long)root->fs_info->generation); WARN_ON(1); } @@ -489,16 +492,10 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans, if (cache_only && parent_level != 1) return 0; - if (trans->transaction != root->fs_info->running_transaction) { - printk(KERN_CRIT "trans %Lu running %Lu\n", trans->transid, - root->fs_info->running_transaction->transid); + if (trans->transaction != root->fs_info->running_transaction) WARN_ON(1); - } - if (trans->transid != root->fs_info->generation) { - printk(KERN_CRIT "trans %Lu running %Lu\n", trans->transid, - root->fs_info->generation); + if (trans->transid != root->fs_info->generation) WARN_ON(1); - } parent_nritems = btrfs_header_nritems(parent); blocksize = btrfs_level_size(root, parent_level - 1); @@ -681,51 +678,18 @@ static int check_leaf(struct btrfs_root *root, struct btrfs_path *path, BUG_ON(btrfs_node_blockptr(parent, parent_slot) != btrfs_header_bytenr(leaf)); } -#if 0 - for (i = 0; nritems > 1 && i < nritems - 2; i++) { - btrfs_item_key_to_cpu(leaf, &cpukey, i + 1); - btrfs_item_key(leaf, &leaf_key, i); - if (comp_keys(&leaf_key, &cpukey) >= 0) { - btrfs_print_leaf(root, leaf); - printk("slot %d offset bad key\n", i); - BUG_ON(1); - } - if (btrfs_item_offset_nr(leaf, i) != - btrfs_item_end_nr(leaf, i + 1)) { - btrfs_print_leaf(root, leaf); - printk("slot %d offset bad\n", i); - BUG_ON(1); - } - if (i == 0) { - if (btrfs_item_offset_nr(leaf, i) + - btrfs_item_size_nr(leaf, i) != - BTRFS_LEAF_DATA_SIZE(root)) { - btrfs_print_leaf(root, leaf); - printk("slot %d first offset bad\n", i); - BUG_ON(1); - } - } - } - if (nritems > 0) { - if (btrfs_item_size_nr(leaf, nritems - 1) > 4096) { - btrfs_print_leaf(root, leaf); - printk("slot %d bad size \n", nritems - 1); - BUG_ON(1); - } - } -#endif if (slot != 0 && slot < nritems - 1) { btrfs_item_key(leaf, &leaf_key, slot); btrfs_item_key_to_cpu(leaf, &cpukey, slot - 1); if (comp_keys(&leaf_key, &cpukey) <= 0) { btrfs_print_leaf(root, leaf); - printk("slot %d offset bad key\n", slot); + printk(KERN_CRIT "slot %d offset bad key\n", slot); BUG_ON(1); } if (btrfs_item_offset_nr(leaf, slot - 1) != btrfs_item_end_nr(leaf, slot)) { btrfs_print_leaf(root, leaf); - printk("slot %d offset bad\n", slot); + printk(KERN_CRIT "slot %d offset bad\n", slot); BUG_ON(1); } } @@ -736,7 +700,7 @@ static int check_leaf(struct btrfs_root *root, struct btrfs_path *path, if (btrfs_item_offset_nr(leaf, slot) != btrfs_item_end_nr(leaf, slot + 1)) { btrfs_print_leaf(root, leaf); - printk("slot %d offset bad\n", slot); + printk(KERN_CRIT "slot %d offset bad\n", slot); BUG_ON(1); } } @@ -745,30 +709,10 @@ static int check_leaf(struct btrfs_root *root, struct btrfs_path *path, return 0; } -static int noinline check_block(struct btrfs_root *root, +static noinline int check_block(struct btrfs_root *root, struct btrfs_path *path, int level) { - u64 found_start; return 0; - if (btrfs_header_level(path->nodes[level]) != level) - printk("warning: bad level %Lu wanted %d found %d\n", - path->nodes[level]->start, level, - btrfs_header_level(path->nodes[level])); - found_start = btrfs_header_bytenr(path->nodes[level]); - if (found_start != path->nodes[level]->start) { - printk("warning: bad bytentr %Lu found %Lu\n", - path->nodes[level]->start, found_start); - } -#if 0 - struct extent_buffer *buf = path->nodes[level]; - - if (memcmp_extent_buffer(buf, root->fs_info->fsid, - (unsigned long)btrfs_header_fsid(buf), - BTRFS_FSID_SIZE)) { - printk("warning bad block %Lu\n", buf->start); - return 1; - } -#endif if (level == 0) return check_leaf(root, path, level); return check_node(root, path, level); @@ -802,7 +746,7 @@ static noinline int generic_bin_search(struct extent_buffer *eb, unsigned long map_len = 0; int err; - while(low < high) { + while (low < high) { mid = (low + high) / 2; offset = p + mid * item_size; @@ -1130,7 +1074,7 @@ enospc: * when they are completely full. This is also done top down, so we * have to be pessimistic. */ -static int noinline push_nodes_for_insert(struct btrfs_trans_handle *trans, +static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level) { @@ -1296,7 +1240,7 @@ static noinline void reada_for_search(struct btrfs_root *root, nritems = btrfs_header_nritems(node); nr = slot; - while(1) { + while (1) { if (direction < 0) { if (nr == 0) break; @@ -1322,7 +1266,8 @@ static noinline void reada_for_search(struct btrfs_root *root, nscan++; if (path->reada < 2 && (nread > (64 * 1024) || nscan > 32)) break; - if(nread > (256 * 1024) || nscan > 128) + + if (nread > (256 * 1024) || nscan > 128) break; if (search < lowest_read) @@ -1333,17 +1278,17 @@ static noinline void reada_for_search(struct btrfs_root *root, } /* - * when we walk down the tree, it is usually safe to unlock the higher layers in - * the tree. The exceptions are when our path goes through slot 0, because operations - * on the tree might require changing key pointers higher up in the tree. + * when we walk down the tree, it is usually safe to unlock the higher layers + * in the tree. The exceptions are when our path goes through slot 0, because + * operations on the tree might require changing key pointers higher up in the + * tree. * - * callers might also have set path->keep_locks, which tells this code to - * keep the lock if the path points to the last slot in the block. This is - * part of walking through the tree, and selecting the next slot in the higher - * block. + * callers might also have set path->keep_locks, which tells this code to keep + * the lock if the path points to the last slot in the block. This is part of + * walking through the tree, and selecting the next slot in the higher block. * - * lowest_unlock sets the lowest level in the tree we're allowed to unlock. - * so if lowest_unlock is 1, level 0 won't be unlocked + * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so + * if lowest_unlock is 1, level 0 won't be unlocked */ static noinline void unlock_up(struct btrfs_path *path, int level, int lowest_unlock) @@ -1832,9 +1777,8 @@ static int push_node_left(struct btrfs_trans_handle *trans, if (!empty && src_nritems <= 8) return 1; - if (push_items <= 0) { + if (push_items <= 0) return 1; - } if (empty) { push_items = min(src_nritems, push_items); @@ -1854,7 +1798,7 @@ static int push_node_left(struct btrfs_trans_handle *trans, copy_extent_buffer(dst, src, btrfs_node_key_ptr_offset(dst_nritems), btrfs_node_key_ptr_offset(0), - push_items * sizeof(struct btrfs_key_ptr)); + push_items * sizeof(struct btrfs_key_ptr)); if (push_items < src_nritems) { memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0), @@ -1899,19 +1843,16 @@ static int balance_node_right(struct btrfs_trans_handle *trans, src_nritems = btrfs_header_nritems(src); dst_nritems = btrfs_header_nritems(dst); push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems; - if (push_items <= 0) { + if (push_items <= 0) return 1; - } - if (src_nritems < 4) { + if (src_nritems < 4) return 1; - } max_push = src_nritems / 2 + 1; /* don't try to empty the node */ - if (max_push >= src_nritems) { + if (max_push >= src_nritems) return 1; - } if (max_push < push_items) push_items = max_push; @@ -1924,7 +1865,7 @@ static int balance_node_right(struct btrfs_trans_handle *trans, copy_extent_buffer(dst, src, btrfs_node_key_ptr_offset(0), btrfs_node_key_ptr_offset(src_nritems - push_items), - push_items * sizeof(struct btrfs_key_ptr)); + push_items * sizeof(struct btrfs_key_ptr)); btrfs_set_header_nritems(src, src_nritems - push_items); btrfs_set_header_nritems(dst, dst_nritems + push_items); @@ -1945,7 +1886,7 @@ static int balance_node_right(struct btrfs_trans_handle *trans, * * returns zero on success or < 0 on failure. */ -static int noinline insert_new_root(struct btrfs_trans_handle *trans, +static noinline int insert_new_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level) { @@ -2176,14 +2117,15 @@ static int leaf_space_used(struct extent_buffer *l, int start, int nr) * the start of the leaf data. IOW, how much room * the leaf has left for both items and data */ -int noinline btrfs_leaf_free_space(struct btrfs_root *root, +noinline int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf) { int nritems = btrfs_header_nritems(leaf); int ret; ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems); if (ret < 0) { - printk("leaf free space ret %d, leaf data size %lu, used %d nritems %d\n", + printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, " + "used %d nritems %d\n", ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root), leaf_space_used(leaf, 0, nritems), nritems); } @@ -2219,9 +2161,9 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root int ret; slot = path->slots[1]; - if (!path->nodes[1]) { + if (!path->nodes[1]) return 1; - } + upper = path->nodes[1]; if (slot >= btrfs_header_nritems(upper) - 1) return 1; @@ -2418,9 +2360,8 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root return 1; right_nritems = btrfs_header_nritems(right); - if (right_nritems == 0) { + if (right_nritems == 0) return 1; - } WARN_ON(!btrfs_tree_locked(path->nodes[1])); @@ -2502,7 +2443,7 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root push_items * sizeof(struct btrfs_item)); push_space = BTRFS_LEAF_DATA_SIZE(root) - - btrfs_item_offset_nr(right, push_items -1); + btrfs_item_offset_nr(right, push_items - 1); copy_extent_buffer(left, right, btrfs_leaf_data(left) + leaf_data_end(root, left) - push_space, @@ -2537,7 +2478,8 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root /* fixup right node */ if (push_items > right_nritems) { - printk("push items %d nr %u\n", push_items, right_nritems); + printk(KERN_CRIT "push items %d nr %u\n", push_items, + right_nritems); WARN_ON(1); } @@ -2640,9 +2582,8 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans, /* first try to make some room by pushing left and right */ if (data_size && ins_key->type != BTRFS_DIR_ITEM_KEY) { wret = push_leaf_right(trans, root, path, data_size, 0); - if (wret < 0) { + if (wret < 0) return wret; - } if (wret) { wret = push_leaf_left(trans, root, path, data_size, 0); if (wret < 0) @@ -2665,7 +2606,7 @@ again: l = path->nodes[0]; slot = path->slots[0]; nritems = btrfs_header_nritems(l); - mid = (nritems + 1)/ 2; + mid = (nritems + 1) / 2; right = btrfs_alloc_free_block(trans, root, root->leafsize, path->nodes[1]->start, @@ -2734,7 +2675,7 @@ again: path->slots[0] = 0; if (path->slots[1] == 0) { wret = fixup_low_keys(trans, root, - path, &disk_key, 1); + path, &disk_key, 1); if (wret) ret = wret; } @@ -3033,8 +2974,8 @@ int btrfs_truncate_item(struct btrfs_trans_handle *trans, BTRFS_FILE_EXTENT_INLINE) { ptr = btrfs_item_ptr_offset(leaf, slot); memmove_extent_buffer(leaf, ptr, - (unsigned long)fi, - offsetof(struct btrfs_file_extent_item, + (unsigned long)fi, + offsetof(struct btrfs_file_extent_item, disk_bytenr)); } } @@ -3096,7 +3037,8 @@ int btrfs_extend_item(struct btrfs_trans_handle *trans, BUG_ON(slot < 0); if (slot >= nritems) { btrfs_print_leaf(root, leaf); - printk("slot %d too large, nritems %d\n", slot, nritems); + printk(KERN_CRIT "slot %d too large, nritems %d\n", + slot, nritems); BUG_ON(1); } @@ -3218,7 +3160,7 @@ int btrfs_insert_some_items(struct btrfs_trans_handle *trans, if (old_data < data_end) { btrfs_print_leaf(root, leaf); - printk("slot %d old_data %d data_end %d\n", + printk(KERN_CRIT "slot %d old_data %d data_end %d\n", slot, old_data, data_end); BUG_ON(1); } @@ -3317,9 +3259,8 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, unsigned int data_end; struct btrfs_disk_key disk_key; - for (i = 0; i < nr; i++) { + for (i = 0; i < nr; i++) total_data += data_size[i]; - } total_size = total_data + (nr * sizeof(struct btrfs_item)); ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1); @@ -3336,7 +3277,7 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, if (btrfs_leaf_free_space(root, leaf) < total_size) { btrfs_print_leaf(root, leaf); - printk("not enough freespace need %u have %d\n", + printk(KERN_CRIT "not enough freespace need %u have %d\n", total_size, btrfs_leaf_free_space(root, leaf)); BUG(); } @@ -3349,7 +3290,7 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, if (old_data < data_end) { btrfs_print_leaf(root, leaf); - printk("slot %d old_data %d data_end %d\n", + printk(KERN_CRIT "slot %d old_data %d data_end %d\n", slot, old_data, data_end); BUG_ON(1); } @@ -3457,7 +3398,7 @@ static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, int wret; nritems = btrfs_header_nritems(parent); - if (slot != nritems -1) { + if (slot != nritems - 1) { memmove_extent_buffer(parent, btrfs_node_key_ptr_offset(slot), btrfs_node_key_ptr_offset(slot + 1), @@ -3614,7 +3555,8 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, if (btrfs_header_nritems(leaf) == 0) { path->slots[1] = slot; - ret = btrfs_del_leaf(trans, root, path, leaf->start); + ret = btrfs_del_leaf(trans, root, path, + leaf->start); BUG_ON(ret); free_extent_buffer(leaf); } else { @@ -3717,7 +3659,7 @@ again: ret = 1; goto out; } - while(1) { + while (1) { nritems = btrfs_header_nritems(cur); level = btrfs_header_level(cur); sret = bin_search(cur, min_key, level, &slot); @@ -3738,7 +3680,7 @@ again: * min_trans parameters. If it isn't in cache or is too * old, skip to the next one. */ - while(slot < nritems) { + while (slot < nritems) { u64 blockptr; u64 gen; struct extent_buffer *tmp; @@ -3830,7 +3772,7 @@ int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, struct extent_buffer *c; WARN_ON(!path->keep_locks); - while(level < BTRFS_MAX_LEVEL) { + while (level < BTRFS_MAX_LEVEL) { if (!path->nodes[level]) return 1; @@ -3839,9 +3781,8 @@ int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, next: if (slot >= btrfs_header_nritems(c)) { level++; - if (level == BTRFS_MAX_LEVEL) { + if (level == BTRFS_MAX_LEVEL) return 1; - } continue; } if (level == 0) @@ -3889,9 +3830,8 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) int ret; nritems = btrfs_header_nritems(path->nodes[0]); - if (nritems == 0) { + if (nritems == 0) return 1; - } btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1); @@ -3915,7 +3855,7 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) goto done; } - while(level < BTRFS_MAX_LEVEL) { + while (level < BTRFS_MAX_LEVEL) { if (!path->nodes[level]) return 1; @@ -3923,9 +3863,8 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) c = path->nodes[level]; if (slot >= btrfs_header_nritems(c)) { level++; - if (level == BTRFS_MAX_LEVEL) { + if (level == BTRFS_MAX_LEVEL) return 1; - } continue; } @@ -3946,7 +3885,7 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) break; } path->slots[level] = slot; - while(1) { + while (1) { level--; c = path->nodes[level]; if (path->locks[level]) @@ -3986,7 +3925,7 @@ int btrfs_previous_item(struct btrfs_root *root, u32 nritems; int ret; - while(1) { + while (1) { if (path->slots[0] == 0) { ret = btrfs_prev_leaf(root, path); if (ret != 0) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index ccea0648e10..eee060f8811 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -126,7 +126,6 @@ struct btrfs_ordered_sum; static int btrfs_csum_sizes[] = { 4, 0 }; /* four bytes for CRC32 */ -//#define BTRFS_CRC32_SIZE 4 #define BTRFS_EMPTY_DIR_SIZE 0 #define BTRFS_FT_UNKNOWN 0 @@ -283,8 +282,8 @@ struct btrfs_header { } __attribute__ ((__packed__)); #define BTRFS_NODEPTRS_PER_BLOCK(r) (((r)->nodesize - \ - sizeof(struct btrfs_header)) / \ - sizeof(struct btrfs_key_ptr)) + sizeof(struct btrfs_header)) / \ + sizeof(struct btrfs_key_ptr)) #define __BTRFS_LEAF_DATA_SIZE(bs) ((bs) - sizeof(struct btrfs_header)) #define BTRFS_LEAF_DATA_SIZE(r) (__BTRFS_LEAF_DATA_SIZE(r->leafsize)) #define BTRFS_MAX_INLINE_DATA_SIZE(r) (BTRFS_LEAF_DATA_SIZE(r) - \ @@ -1512,7 +1511,7 @@ static inline struct btrfs_header *btrfs_buffer_header(struct extent_buffer *eb) static inline int btrfs_is_leaf(struct extent_buffer *eb) { - return (btrfs_header_level(eb) == 0); + return btrfs_header_level(eb) == 0; } /* struct btrfs_root_item */ @@ -1597,8 +1596,8 @@ static inline unsigned long btrfs_leaf_data(struct extent_buffer *l) /* struct btrfs_file_extent_item */ BTRFS_SETGET_FUNCS(file_extent_type, struct btrfs_file_extent_item, type, 8); -static inline unsigned long btrfs_file_extent_inline_start(struct - btrfs_file_extent_item *e) +static inline unsigned long +btrfs_file_extent_inline_start(struct btrfs_file_extent_item *e) { unsigned long offset = (unsigned long)e; offset += offsetof(struct btrfs_file_extent_item, disk_bytenr); @@ -1660,20 +1659,20 @@ static inline int btrfs_set_root_name(struct btrfs_root *root, const char *name, int len) { /* if we already have a name just free it */ - if (root->name) - kfree(root->name); + kfree(root->name); root->name = kmalloc(len+1, GFP_KERNEL); if (!root->name) return -ENOMEM; memcpy(root->name, name, len); - root->name[len] ='\0'; + root->name[len] = '\0'; return 0; } -static inline u32 btrfs_level_size(struct btrfs_root *root, int level) { +static inline u32 btrfs_level_size(struct btrfs_root *root, int level) +{ if (level == 0) return root->leafsize; return root->nodesize; @@ -1707,9 +1706,9 @@ int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans, int btrfs_extent_post_op(struct btrfs_trans_handle *trans, struct btrfs_root *root); int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy); -struct btrfs_block_group_cache *btrfs_lookup_block_group(struct - btrfs_fs_info *info, - u64 bytenr); +struct btrfs_block_group_cache *btrfs_lookup_block_group( + struct btrfs_fs_info *info, + u64 bytenr); u64 btrfs_find_block_group(struct btrfs_root *root, u64 search_start, u64 search_hint, int owner); struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans, @@ -1908,8 +1907,9 @@ int btrfs_search_root(struct btrfs_root *root, u64 search_start, int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid, struct btrfs_root *latest_root); /* dir-item.c */ -int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root - *root, const char *name, int name_len, u64 dir, +int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, + struct btrfs_root *root, const char *name, + int name_len, u64 dir, struct btrfs_key *location, u8 type, u64 index); struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c index 5040b71f190..926a0b287a7 100644 --- a/fs/btrfs/dir-item.c +++ b/fs/btrfs/dir-item.c @@ -333,7 +333,7 @@ struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root, leaf = path->nodes[0]; dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item); total_len = btrfs_item_size_nr(leaf, path->slots[0]); - while(cur < total_len) { + while (cur < total_len) { this_len = sizeof(*dir_item) + btrfs_dir_name_len(leaf, dir_item) + btrfs_dir_data_len(leaf, dir_item); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index dae25e78a6b..81a313874ae 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -23,7 +23,7 @@ #include #include #include -#include // for block_sync_page +#include #include #include #include @@ -40,19 +40,6 @@ #include "ref-cache.h" #include "tree-log.h" -#if 0 -static int check_tree_block(struct btrfs_root *root, struct extent_buffer *buf) -{ - if (extent_buffer_blocknr(buf) != btrfs_header_blocknr(buf)) { - printk(KERN_CRIT "buf blocknr(buf) is %llu, header is %llu\n", - (unsigned long long)extent_buffer_blocknr(buf), - (unsigned long long)btrfs_header_blocknr(buf)); - return 1; - } - return 0; -} -#endif - static struct extent_io_ops btree_extent_io_ops; static void end_workqueue_fn(struct btrfs_work *work); @@ -128,23 +115,13 @@ static struct extent_map *btree_get_extent(struct inode *inode, u64 failed_start = em->start; u64 failed_len = em->len; - printk("failed to insert %Lu %Lu -> %Lu into tree\n", - em->start, em->len, em->block_start); free_extent_map(em); em = lookup_extent_mapping(em_tree, start, len); if (em) { - printk("after failing, found %Lu %Lu %Lu\n", - em->start, em->len, em->block_start); ret = 0; } else { em = lookup_extent_mapping(em_tree, failed_start, failed_len); - if (em) { - printk("double failure lookup gives us " - "%Lu %Lu -> %Lu\n", em->start, - em->len, em->block_start); - free_extent_map(em); - } ret = -EIO; } } else if (ret) { @@ -191,15 +168,12 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf, unsigned long inline_result; len = buf->len - offset; - while(len > 0) { + while (len > 0) { err = map_private_extent_buffer(buf, offset, 32, &map_token, &kaddr, &map_start, &map_len, KM_USER0); - if (err) { - printk("failed to map extent buffer! %lu\n", - offset); + if (err) return 1; - } cur_len = min(len, map_len - (offset - map_start)); crc = btrfs_csum_data(root, kaddr + offset - map_start, crc, cur_len); @@ -218,15 +192,14 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf, btrfs_csum_final(crc, result); if (verify) { - /* FIXME, this is not good */ if (memcmp_extent_buffer(buf, result, 0, csum_size)) { u32 val; u32 found = 0; memcpy(&found, result, csum_size); read_extent_buffer(buf, &val, 0, csum_size); - printk("btrfs: %s checksum verify failed on %llu " - "wanted %X found %X level %d\n", + printk(KERN_INFO "btrfs: %s checksum verify failed " + "on %llu wanted %X found %X level %d\n", root->fs_info->sb->s_id, buf->start, val, found, btrfs_header_level(buf)); if (result != (char *)&inline_result) @@ -293,7 +266,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root, if (!ret && !verify_parent_transid(io_tree, eb, parent_transid)) return ret; -printk("read extent buffer pages failed with ret %d mirror no %d\n", ret, mirror_num); + num_copies = btrfs_num_copies(&root->fs_info->mapping_tree, eb->start, eb->len); if (num_copies == 1) @@ -307,9 +280,10 @@ printk("read extent buffer pages failed with ret %d mirror no %d\n", ret, mirror } /* - * checksum a dirty tree block before IO. This has extra checks to make - * sure we only fill in the checksum field in the first page of a multi-page block + * checksum a dirty tree block before IO. This has extra checks to make sure + * we only fill in the checksum field in the first page of a multi-page block */ + static int csum_dirty_buffer(struct btrfs_root *root, struct page *page) { struct extent_io_tree *tree; @@ -327,28 +301,22 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page) if (!page->private) goto out; len = page->private >> 2; - if (len == 0) { - WARN_ON(1); - } + WARN_ON(len == 0); + eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS); ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE, btrfs_header_generation(eb)); BUG_ON(ret); found_start = btrfs_header_bytenr(eb); if (found_start != start) { - printk("warning: eb start incorrect %Lu buffer %Lu len %lu\n", - start, found_start, len); WARN_ON(1); goto err; } if (eb->first_page != page) { - printk("bad first page %lu %lu\n", eb->first_page->index, - page->index); WARN_ON(1); goto err; } if (!PageUptodate(page)) { - printk("csum not up to date page %lu\n", page->index); WARN_ON(1); goto err; } @@ -396,29 +364,30 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, goto out; if (!page->private) goto out; + len = page->private >> 2; - if (len == 0) { - WARN_ON(1); - } + WARN_ON(len == 0); + eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS); found_start = btrfs_header_bytenr(eb); if (found_start != start) { - printk("bad tree block start %llu %llu\n", + printk(KERN_INFO "btrfs bad tree block start %llu %llu\n", (unsigned long long)found_start, (unsigned long long)eb->start); ret = -EIO; goto err; } if (eb->first_page != page) { - printk("bad first page %lu %lu\n", eb->first_page->index, - page->index); + printk(KERN_INFO "btrfs bad first page %lu %lu\n", + eb->first_page->index, page->index); WARN_ON(1); ret = -EIO; goto err; } if (check_tree_block_fsid(root, eb)) { - printk("bad fsid on block %Lu\n", eb->start); + printk(KERN_INFO "btrfs bad fsid on block %llu\n", + (unsigned long long)eb->start); ret = -EIO; goto err; } @@ -578,7 +547,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, HZ/10); } #endif - while(atomic_read(&fs_info->async_submit_draining) && + while (atomic_read(&fs_info->async_submit_draining) && atomic_read(&fs_info->nr_async_submits)) { wait_event(fs_info->async_submit_wait, (atomic_read(&fs_info->nr_async_submits) == 0)); @@ -594,7 +563,7 @@ static int btree_csum_one_bio(struct bio *bio) struct btrfs_root *root; WARN_ON(bio->bi_vcnt <= 0); - while(bio_index < bio->bi_vcnt) { + while (bio_index < bio->bi_vcnt) { root = BTRFS_I(bvec->bv_page->mapping->host)->root; csum_dirty_buffer(root, bvec->bv_page); bio_index++; @@ -680,9 +649,8 @@ static int btree_writepages(struct address_space *mapping, num_dirty = count_range_bits(tree, &start, (u64)-1, thresh, EXTENT_DIRTY); - if (num_dirty < thresh) { + if (num_dirty < thresh) return 0; - } } return extent_writepages(tree, mapping, btree_get_extent, wbc); } @@ -701,15 +669,14 @@ static int btree_releasepage(struct page *page, gfp_t gfp_flags) int ret; if (PageWriteback(page) || PageDirty(page)) - return 0; + return 0; tree = &BTRFS_I(page->mapping->host)->io_tree; map = &BTRFS_I(page->mapping->host)->extent_tree; ret = try_release_extent_state(map, tree, page, gfp_flags); - if (!ret) { + if (!ret) return 0; - } ret = try_release_extent_buffer(tree, page); if (ret == 1) { @@ -728,8 +695,8 @@ static void btree_invalidatepage(struct page *page, unsigned long offset) extent_invalidatepage(tree, page, offset); btree_releasepage(page, GFP_NOFS); if (PagePrivate(page)) { - printk("warning page private not zero on page %Lu\n", - page_offset(page)); + printk(KERN_WARNING "btrfs warning page private not zero " + "on page %llu\n", (unsigned long long)page_offset(page)); ClearPagePrivate(page); set_page_private(page, 0); page_cache_release(page); @@ -813,7 +780,7 @@ int btrfs_write_tree_block(struct extent_buffer *buf) int btrfs_wait_tree_block_writeback(struct extent_buffer *buf) { return btrfs_wait_on_page_writeback_range(buf->first_page->mapping, - buf->start, buf->start + buf->len -1); + buf->start, buf->start + buf->len - 1); } struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, @@ -832,11 +799,10 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid); - if (ret == 0) { + if (ret == 0) buf->flags |= EXTENT_UPTODATE; - } else { + else WARN_ON(1); - } return buf; } @@ -944,7 +910,7 @@ int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans, if (!log_root_tree) return 0; - while(1) { + while (1) { ret = find_first_extent_bit(&log_root_tree->dirty_log_pages, 0, &start, &end, EXTENT_DIRTY); if (ret) @@ -1165,24 +1131,6 @@ struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info, root->in_sysfs = 1; return root; } -#if 0 -static int add_hasher(struct btrfs_fs_info *info, char *type) { - struct btrfs_hasher *hasher; - - hasher = kmalloc(sizeof(*hasher), GFP_NOFS); - if (!hasher) - return -ENOMEM; - hasher->hash_tfm = crypto_alloc_hash(type, 0, CRYPTO_ALG_ASYNC); - if (!hasher->hash_tfm) { - kfree(hasher); - return -EINVAL; - } - spin_lock(&info->hash_lock); - list_add(&hasher->list, &info->hashers); - spin_unlock(&info->hash_lock); - return 0; -} -#endif static int btrfs_congested_fn(void *congested_data, int bdi_bits) { @@ -1226,9 +1174,8 @@ static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page) continue; bdi = blk_get_backing_dev_info(device->bdev); - if (bdi->unplug_io_fn) { + if (bdi->unplug_io_fn) bdi->unplug_io_fn(bdi, page); - } } } @@ -1420,8 +1367,9 @@ static int transaction_kthread(void *arg) mutex_lock(&root->fs_info->transaction_kthread_mutex); if (root->fs_info->total_ref_cache_size > 20 * 1024 * 1024) { - printk("btrfs: total reference cache size %Lu\n", - root->fs_info->total_ref_cache_size); + printk(KERN_INFO "btrfs: total reference cache " + "size %llu\n", + root->fs_info->total_ref_cache_size); } mutex_lock(&root->fs_info->trans_mutex); @@ -1592,14 +1540,6 @@ struct btrfs_root *open_ctree(struct super_block *sb, atomic_set(&fs_info->tree_log_writers, 0); fs_info->tree_log_transid = 0; -#if 0 - ret = add_hasher(fs_info, "crc32c"); - if (ret) { - printk("btrfs: failed hash setup, modprobe cryptomgr?\n"); - err = -ENOMEM; - goto fail_iput; - } -#endif __setup_root(4096, 4096, 4096, 4096, tree_root, fs_info, BTRFS_ROOT_TREE_OBJECTID); @@ -1720,7 +1660,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC, sizeof(disk_super->magic))) { - printk("btrfs: valid FS not found on %s\n", sb->s_id); + printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id); goto fail_sb_buffer; } @@ -1728,8 +1668,8 @@ struct btrfs_root *open_ctree(struct super_block *sb, ret = btrfs_read_sys_array(tree_root); mutex_unlock(&fs_info->chunk_mutex); if (ret) { - printk("btrfs: failed to read the system array on %s\n", - sb->s_id); + printk(KERN_WARNING "btrfs: failed to read the system " + "array on %s\n", sb->s_id); goto fail_sys_array; } @@ -1746,14 +1686,15 @@ struct btrfs_root *open_ctree(struct super_block *sb, BUG_ON(!chunk_root->node); read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid, - (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node), - BTRFS_UUID_SIZE); + (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node), + BTRFS_UUID_SIZE); mutex_lock(&fs_info->chunk_mutex); ret = btrfs_read_chunk_tree(chunk_root); mutex_unlock(&fs_info->chunk_mutex); if (ret) { - printk("btrfs: failed to read chunk tree on %s\n", sb->s_id); + printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n", + sb->s_id); goto fail_chunk_root; } @@ -1812,7 +1753,8 @@ struct btrfs_root *open_ctree(struct super_block *sb, u64 bytenr = btrfs_super_log_root(disk_super); if (fs_devices->rw_devices == 0) { - printk("Btrfs log replay required on RO media\n"); + printk(KERN_WARNING "Btrfs log replay required " + "on RO media\n"); err = -EIO; goto fail_trans_kthread; } @@ -2097,7 +2039,8 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors) total_errors++; } if (total_errors > max_errors) { - printk("btrfs: %d errors while writing supers\n", total_errors); + printk(KERN_ERR "btrfs: %d errors while writing supers\n", + total_errors); BUG(); } @@ -2114,7 +2057,8 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors) total_errors++; } if (total_errors > max_errors) { - printk("btrfs: %d errors while writing supers\n", total_errors); + printk(KERN_ERR "btrfs: %d errors while writing supers\n", + total_errors); BUG(); } return 0; @@ -2137,16 +2081,11 @@ int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root) down_write(&root->anon_super.s_umount); kill_anon_super(&root->anon_super); } -#if 0 - if (root->in_sysfs) - btrfs_sysfs_del_root(root); -#endif if (root->node) free_extent_buffer(root->node); if (root->commit_root) free_extent_buffer(root->commit_root); - if (root->name) - kfree(root->name); + kfree(root->name); kfree(root); return 0; } @@ -2157,7 +2096,7 @@ static int del_fs_roots(struct btrfs_fs_info *fs_info) struct btrfs_root *gang[8]; int i; - while(1) { + while (1) { ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, (void **)gang, 0, ARRAY_SIZE(gang)); @@ -2228,18 +2167,17 @@ int close_ctree(struct btrfs_root *root) if (!(fs_info->sb->s_flags & MS_RDONLY)) { ret = btrfs_commit_super(root); - if (ret) { - printk("btrfs: commit super returns %d\n", ret); - } + if (ret) + printk(KERN_ERR "btrfs: commit super ret %d\n", ret); } if (fs_info->delalloc_bytes) { - printk("btrfs: at unmount delalloc count %Lu\n", + printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n", fs_info->delalloc_bytes); } if (fs_info->total_ref_cache_size) { - printk("btrfs: at umount reference cache size %Lu\n", - fs_info->total_ref_cache_size); + printk(KERN_INFO "btrfs: at umount reference cache size %llu\n", + (unsigned long long)fs_info->total_ref_cache_size); } if (fs_info->extent_root->node) @@ -2248,13 +2186,13 @@ int close_ctree(struct btrfs_root *root) if (fs_info->tree_root->node) free_extent_buffer(fs_info->tree_root->node); - if (root->fs_info->chunk_root->node); + if (root->fs_info->chunk_root->node) free_extent_buffer(root->fs_info->chunk_root->node); - if (root->fs_info->dev_root->node); + if (root->fs_info->dev_root->node) free_extent_buffer(root->fs_info->dev_root->node); - if (root->fs_info->csum_root->node); + if (root->fs_info->csum_root->node) free_extent_buffer(root->fs_info->csum_root->node); btrfs_free_block_groups(root->fs_info); @@ -2273,7 +2211,7 @@ int close_ctree(struct btrfs_root *root) btrfs_stop_workers(&fs_info->submit_workers); #if 0 - while(!list_empty(&fs_info->hashers)) { + while (!list_empty(&fs_info->hashers)) { struct btrfs_hasher *hasher; hasher = list_entry(fs_info->hashers.next, struct btrfs_hasher, hashers); @@ -2324,9 +2262,11 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf) WARN_ON(!btrfs_tree_locked(buf)); if (transid != root->fs_info->generation) { - printk(KERN_CRIT "transid mismatch buffer %llu, found %Lu running %Lu\n", + printk(KERN_CRIT "btrfs transid mismatch buffer %llu, " + "found %llu running %llu\n", (unsigned long long)buf->start, - transid, root->fs_info->generation); + (unsigned long long)transid, + (unsigned long long)root->fs_info->generation); WARN_ON(1); } set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, buf); @@ -2361,9 +2301,8 @@ int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid) struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root; int ret; ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid); - if (ret == 0) { + if (ret == 0) buf->flags |= EXTENT_UPTODATE; - } return ret; } diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index 48b82cd7583..85315d2c90d 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -7,9 +7,11 @@ #include "export.h" #include "compat.h" -#define BTRFS_FID_SIZE_NON_CONNECTABLE (offsetof(struct btrfs_fid, parent_objectid)/4) -#define BTRFS_FID_SIZE_CONNECTABLE (offsetof(struct btrfs_fid, parent_root_objectid)/4) -#define BTRFS_FID_SIZE_CONNECTABLE_ROOT (sizeof(struct btrfs_fid)/4) +#define BTRFS_FID_SIZE_NON_CONNECTABLE (offsetof(struct btrfs_fid, \ + parent_objectid) / 4) +#define BTRFS_FID_SIZE_CONNECTABLE (offsetof(struct btrfs_fid, \ + parent_root_objectid) / 4) +#define BTRFS_FID_SIZE_CONNECTABLE_ROOT (sizeof(struct btrfs_fid) / 4) static int btrfs_encode_fh(struct dentry *dentry, u32 *fh, int *max_len, int connectable) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 780c1eeb829..ec43fa526d7 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -49,10 +49,10 @@ struct pending_extent_op { int del; }; -static int finish_current_insert(struct btrfs_trans_handle *trans, struct - btrfs_root *extent_root, int all); -static int del_pending_extents(struct btrfs_trans_handle *trans, struct - btrfs_root *extent_root, int all); +static int finish_current_insert(struct btrfs_trans_handle *trans, + struct btrfs_root *extent_root, int all); +static int del_pending_extents(struct btrfs_trans_handle *trans, + struct btrfs_root *extent_root, int all); static int pin_down_bytes(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 num_bytes, int is_data); @@ -247,7 +247,7 @@ static int cache_block_group(struct btrfs_root *root, if (ret < 0) goto err; - while(1) { + while (1) { leaf = path->nodes[0]; slot = path->slots[0]; if (slot >= btrfs_header_nritems(leaf)) { @@ -292,9 +292,8 @@ err: /* * return the block group that starts at or after bytenr */ -static struct btrfs_block_group_cache *btrfs_lookup_first_block_group(struct - btrfs_fs_info *info, - u64 bytenr) +static struct btrfs_block_group_cache * +btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr) { struct btrfs_block_group_cache *cache; @@ -306,9 +305,9 @@ static struct btrfs_block_group_cache *btrfs_lookup_first_block_group(struct /* * return the block group that contains teh given bytenr */ -struct btrfs_block_group_cache *btrfs_lookup_block_group(struct - btrfs_fs_info *info, - u64 bytenr) +struct btrfs_block_group_cache *btrfs_lookup_block_group( + struct btrfs_fs_info *info, + u64 bytenr) { struct btrfs_block_group_cache *cache; @@ -492,7 +491,7 @@ int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len) * to the key objectid. */ -static int noinline lookup_extent_backref(struct btrfs_trans_handle *trans, +static noinline int lookup_extent_backref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 bytenr, u64 parent, @@ -537,7 +536,7 @@ out: * updates all the backrefs that are pending on update_list for the * extent_root */ -static int noinline update_backrefs(struct btrfs_trans_handle *trans, +static noinline int update_backrefs(struct btrfs_trans_handle *trans, struct btrfs_root *extent_root, struct btrfs_path *path, struct list_head *update_list) @@ -573,9 +572,11 @@ loop: btrfs_ref_generation(leaf, ref) != op->orig_generation || (ref_objectid != op->level && ref_objectid != BTRFS_MULTIPLE_OBJECTIDS)) { - printk(KERN_ERR "couldn't find %Lu, parent %Lu, root %Lu, " - "owner %u\n", op->bytenr, op->orig_parent, - ref_root, op->level); + printk(KERN_ERR "btrfs couldn't find %llu, parent %llu, " + "root %llu, owner %u\n", + (unsigned long long)op->bytenr, + (unsigned long long)op->orig_parent, + (unsigned long long)ref_root, op->level); btrfs_print_leaf(extent_root, leaf); BUG(); } @@ -620,7 +621,7 @@ out: return 0; } -static int noinline insert_extents(struct btrfs_trans_handle *trans, +static noinline int insert_extents(struct btrfs_trans_handle *trans, struct btrfs_root *extent_root, struct btrfs_path *path, struct list_head *insert_list, int nr) @@ -781,7 +782,7 @@ static int noinline insert_extents(struct btrfs_trans_handle *trans, return ret; } -static int noinline insert_extent_backref(struct btrfs_trans_handle *trans, +static noinline int insert_extent_backref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 bytenr, u64 parent, @@ -840,7 +841,7 @@ out: return ret; } -static int noinline remove_extent_backref(struct btrfs_trans_handle *trans, +static noinline int remove_extent_backref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path) { @@ -868,7 +869,7 @@ static int noinline remove_extent_backref(struct btrfs_trans_handle *trans, static void btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len) { -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL); #else blkdev_issue_discard(bdev, start >> 9, len >> 9); @@ -908,7 +909,7 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, #endif } -static int noinline free_extents(struct btrfs_trans_handle *trans, +static noinline int free_extents(struct btrfs_trans_handle *trans, struct btrfs_root *extent_root, struct list_head *del_list) { @@ -937,10 +938,11 @@ search: extent_root->root_key.objectid, op->orig_generation, op->level, 1); if (ret) { - printk("Unable to find backref byte nr %Lu root %Lu gen %Lu " - "owner %u\n", op->bytenr, - extent_root->root_key.objectid, op->orig_generation, - op->level); + printk(KERN_ERR "btrfs unable to find backref byte nr %llu " + "root %llu gen %llu owner %u\n", + (unsigned long long)op->bytenr, + (unsigned long long)extent_root->root_key.objectid, + (unsigned long long)op->orig_generation, op->level); btrfs_print_leaf(extent_root, path->nodes[0]); WARN_ON(1); goto out; @@ -1282,7 +1284,9 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, btrfs_item_key_to_cpu(l, &key, path->slots[0]); if (key.objectid != bytenr) { btrfs_print_leaf(root->fs_info->extent_root, path->nodes[0]); - printk("wanted %Lu found %Lu\n", bytenr, key.objectid); + printk(KERN_ERR "btrfs wanted %llu found %llu\n", + (unsigned long long)bytenr, + (unsigned long long)key.objectid); BUG(); } BUG_ON(key.type != BTRFS_EXTENT_ITEM_KEY); @@ -1353,7 +1357,8 @@ int btrfs_lookup_extent_ref(struct btrfs_trans_handle *trans, goto out; if (ret != 0) { btrfs_print_leaf(root, path->nodes[0]); - printk("failed to find block number %Lu\n", bytenr); + printk(KERN_INFO "btrfs failed to find block number %llu\n", + (unsigned long long)bytenr); BUG(); } l = path->nodes[0]; @@ -1738,7 +1743,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, if (!path) return -ENOMEM; - while(1) { + while (1) { cache = NULL; spin_lock(&root->fs_info->block_group_cache_lock); for (n = rb_first(&root->fs_info->block_group_cache_tree); @@ -1921,10 +1926,8 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, spin_unlock(&space_info->lock); ret = btrfs_alloc_chunk(trans, extent_root, flags); - if (ret) { -printk("space info full %Lu\n", flags); + if (ret) space_info->full = 1; - } out: mutex_unlock(&extent_root->fs_info->chunk_mutex); return ret; @@ -1941,7 +1944,7 @@ static int update_block_group(struct btrfs_trans_handle *trans, u64 old_val; u64 byte_in_group; - while(total) { + while (total) { cache = btrfs_lookup_block_group(info, bytenr); if (!cache) return -1; @@ -2089,7 +2092,7 @@ int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy) int ret; mutex_lock(&root->fs_info->pinned_mutex); - while(1) { + while (1) { ret = find_first_extent_bit(pinned_extents, last, &start, &end, EXTENT_DIRTY); if (ret) @@ -2110,7 +2113,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, int ret; mutex_lock(&root->fs_info->pinned_mutex); - while(1) { + while (1) { ret = find_first_extent_bit(unpin, 0, &start, &end, EXTENT_DIRTY); if (ret) @@ -2400,7 +2403,7 @@ static int __free_extent(struct btrfs_trans_handle *trans, if (ret == 0) { struct btrfs_key found_key; extent_slot = path->slots[0]; - while(extent_slot > 0) { + while (extent_slot > 0) { extent_slot--; btrfs_item_key_to_cpu(path->nodes[0], &found_key, extent_slot); @@ -2422,8 +2425,8 @@ static int __free_extent(struct btrfs_trans_handle *trans, &key, path, -1, 1); if (ret) { printk(KERN_ERR "umm, got %d back from search" - ", was looking for %Lu\n", ret, - bytenr); + ", was looking for %llu\n", ret, + (unsigned long long)bytenr); btrfs_print_leaf(extent_root, path->nodes[0]); } BUG_ON(ret); @@ -2432,9 +2435,12 @@ static int __free_extent(struct btrfs_trans_handle *trans, } else { btrfs_print_leaf(extent_root, path->nodes[0]); WARN_ON(1); - printk("Unable to find ref byte nr %Lu root %Lu " - "gen %Lu owner %Lu\n", bytenr, - root_objectid, ref_generation, owner_objectid); + printk(KERN_ERR "btrfs unable to find ref byte nr %llu " + "root %llu gen %llu owner %llu\n", + (unsigned long long)bytenr, + (unsigned long long)root_objectid, + (unsigned long long)ref_generation, + (unsigned long long)owner_objectid); } leaf = path->nodes[0]; @@ -2517,8 +2523,8 @@ static int __free_extent(struct btrfs_trans_handle *trans, * find all the blocks marked as pending in the radix tree and remove * them from the extent map */ -static int del_pending_extents(struct btrfs_trans_handle *trans, struct - btrfs_root *extent_root, int all) +static int del_pending_extents(struct btrfs_trans_handle *trans, + struct btrfs_root *extent_root, int all) { int ret; int err = 0; @@ -2539,7 +2545,7 @@ static int del_pending_extents(struct btrfs_trans_handle *trans, struct again: mutex_lock(&info->extent_ins_mutex); - while(1) { + while (1) { ret = find_first_extent_bit(pending_del, search, &start, &end, EXTENT_WRITEBACK); if (ret) { @@ -2753,7 +2759,7 @@ static u64 stripe_align(struct btrfs_root *root, u64 val) * ins->offset == number of blocks * Any available blocks before search_start are skipped. */ -static int noinline find_free_extent(struct btrfs_trans_handle *trans, +static noinline int find_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *orig_root, u64 num_bytes, u64 empty_size, u64 search_start, u64 search_end, @@ -2762,7 +2768,7 @@ static int noinline find_free_extent(struct btrfs_trans_handle *trans, int data) { int ret = 0; - struct btrfs_root * root = orig_root->fs_info->extent_root; + struct btrfs_root *root = orig_root->fs_info->extent_root; u64 total_needed = num_bytes; u64 *last_ptr = NULL; u64 last_wanted = 0; @@ -2995,8 +3001,10 @@ loop_check: *last_ptr = ins->objectid + ins->offset; ret = 0; } else if (!ret) { - printk(KERN_ERR "we were searching for %Lu bytes, num_bytes %Lu," - " loop %d, allowed_alloc %d\n", total_needed, num_bytes, + printk(KERN_ERR "btrfs searching for %llu bytes, " + "num_bytes %llu, loop %d, allowed_alloc %d\n", + (unsigned long long)total_needed, + (unsigned long long)num_bytes, loop, allowed_chunk_alloc); ret = -ENOSPC; } @@ -3012,19 +3020,22 @@ static void dump_space_info(struct btrfs_space_info *info, u64 bytes) struct btrfs_block_group_cache *cache; struct list_head *l; - printk(KERN_INFO "space_info has %Lu free, is %sfull\n", - info->total_bytes - info->bytes_used - info->bytes_pinned - - info->bytes_reserved, (info->full) ? "" : "not "); + printk(KERN_INFO "space_info has %llu free, is %sfull\n", + (unsigned long long)(info->total_bytes - info->bytes_used - + info->bytes_pinned - info->bytes_reserved), + (info->full) ? "" : "not "); down_read(&info->groups_sem); list_for_each(l, &info->block_groups) { cache = list_entry(l, struct btrfs_block_group_cache, list); spin_lock(&cache->lock); - printk(KERN_INFO "block group %Lu has %Lu bytes, %Lu used " - "%Lu pinned %Lu reserved\n", - cache->key.objectid, cache->key.offset, - btrfs_block_group_used(&cache->item), - cache->pinned, cache->reserved); + printk(KERN_INFO "block group %llu has %llu bytes, %llu used " + "%llu pinned %llu reserved\n", + (unsigned long long)cache->key.objectid, + (unsigned long long)cache->key.offset, + (unsigned long long)btrfs_block_group_used(&cache->item), + (unsigned long long)cache->pinned, + (unsigned long long)cache->reserved); btrfs_dump_free_space(cache, bytes); spin_unlock(&cache->lock); } @@ -3045,15 +3056,15 @@ static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans, if (data) { alloc_profile = info->avail_data_alloc_bits & - info->data_alloc_profile; + info->data_alloc_profile; data = BTRFS_BLOCK_GROUP_DATA | alloc_profile; } else if (root == root->fs_info->chunk_root) { alloc_profile = info->avail_system_alloc_bits & - info->system_alloc_profile; + info->system_alloc_profile; data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile; } else { alloc_profile = info->avail_metadata_alloc_bits & - info->metadata_alloc_profile; + info->metadata_alloc_profile; data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile; } again: @@ -3092,8 +3103,9 @@ again: struct btrfs_space_info *sinfo; sinfo = __find_space_info(root->fs_info, data); - printk("allocation failed flags %Lu, wanted %Lu\n", - data, num_bytes); + printk(KERN_ERR "btrfs allocation failed flags %llu, " + "wanted %llu\n", (unsigned long long)data, + (unsigned long long)num_bytes); dump_space_info(sinfo, num_bytes); BUG(); } @@ -3108,7 +3120,8 @@ int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len) cache = btrfs_lookup_block_group(root->fs_info, start); if (!cache) { - printk(KERN_ERR "Unable to find block group for %Lu\n", start); + printk(KERN_ERR "Unable to find block group for %llu\n", + (unsigned long long)start); return -ENOSPC; } @@ -3235,10 +3248,12 @@ static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans, } update_block: - ret = update_block_group(trans, root, ins->objectid, ins->offset, 1, 0); + ret = update_block_group(trans, root, ins->objectid, + ins->offset, 1, 0); if (ret) { - printk("update block group failed for %Lu %Lu\n", - ins->objectid, ins->offset); + printk(KERN_ERR "btrfs update block group failed for %llu " + "%llu\n", (unsigned long long)ins->objectid, + (unsigned long long)ins->offset); BUG(); } out: @@ -3420,7 +3435,7 @@ int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans, return 0; } -static int noinline cache_drop_leaf_ref(struct btrfs_trans_handle *trans, +static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_leaf_ref *ref) { @@ -3445,15 +3460,15 @@ static int noinline cache_drop_leaf_ref(struct btrfs_trans_handle *trans, return 0; } -static int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len, - u32 *refs) +static int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, + u64 len, u32 *refs) { int ret; ret = btrfs_lookup_extent_ref(NULL, root, start, len, refs); BUG_ON(ret); -#if 0 // some debugging code in case we see problems here +#if 0 /* some debugging code in case we see problems here */ /* if the refs count is one, it won't get increased again. But * if the ref count is > 1, someone may be decreasing it at * the same time we are. @@ -3474,8 +3489,8 @@ static int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len free_extent_buffer(eb); } if (*refs == 1) { - printk("block %llu went down to one during drop_snap\n", - (unsigned long long)start); + printk(KERN_ERR "btrfs block %llu went down to one " + "during drop_snap\n", (unsigned long long)start); } } @@ -3489,7 +3504,7 @@ static int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len * helper function for drop_snapshot, this walks down the tree dropping ref * counts as it goes. */ -static int noinline walk_down_tree(struct btrfs_trans_handle *trans, +static noinline int walk_down_tree(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int *level) { @@ -3516,7 +3531,7 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans, /* * walk down to the last node level and free all the leaves */ - while(*level >= 0) { + while (*level >= 0) { WARN_ON(*level < 0); WARN_ON(*level >= BTRFS_MAX_LEVEL); cur = path->nodes[*level]; @@ -3576,10 +3591,6 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans, *level = 0; break; } - if (printk_ratelimit()) { - printk("leaf ref miss for bytenr %llu\n", - (unsigned long long)bytenr); - } } next = btrfs_find_tree_block(root, bytenr, blocksize); if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) { @@ -3641,7 +3652,7 @@ out: * walk_down_tree. The main difference is that it checks reference * counts while tree blocks are locked. */ -static int noinline walk_down_subtree(struct btrfs_trans_handle *trans, +static noinline int walk_down_subtree(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int *level) { @@ -3730,7 +3741,7 @@ out: * to find the first node higher up where we haven't yet gone through * all the slots */ -static int noinline walk_up_tree(struct btrfs_trans_handle *trans, +static noinline int walk_up_tree(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int *level, int max_level) @@ -3839,7 +3850,7 @@ int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root } } } - while(1) { + while (1) { wret = walk_down_tree(trans, root, path, &level); if (wret > 0) break; @@ -3920,7 +3931,7 @@ static unsigned long calc_ra(unsigned long start, unsigned long last, return min(last, start + nr - 1); } -static int noinline relocate_inode_pages(struct inode *inode, u64 start, +static noinline int relocate_inode_pages(struct inode *inode, u64 start, u64 len) { u64 page_start; @@ -4011,7 +4022,7 @@ out_unlock: return ret; } -static int noinline relocate_data_extent(struct inode *reloc_inode, +static noinline int relocate_data_extent(struct inode *reloc_inode, struct btrfs_key *extent_key, u64 offset) { @@ -4087,7 +4098,7 @@ static int is_cowonly_root(u64 root_objectid) return 0; } -static int noinline __next_ref_path(struct btrfs_trans_handle *trans, +static noinline int __next_ref_path(struct btrfs_trans_handle *trans, struct btrfs_root *extent_root, struct btrfs_ref_path *ref_path, int first_time) @@ -4119,11 +4130,10 @@ walk_down: if (level < ref_path->lowest_level) break; - if (level >= 0) { + if (level >= 0) bytenr = ref_path->nodes[level]; - } else { + else bytenr = ref_path->extent_start; - } BUG_ON(bytenr == 0); parent = ref_path->nodes[level + 1]; @@ -4170,11 +4180,12 @@ walk_up: level = ref_path->current_level; while (level < BTRFS_MAX_LEVEL - 1) { u64 ref_objectid; - if (level >= 0) { + + if (level >= 0) bytenr = ref_path->nodes[level]; - } else { + else bytenr = ref_path->extent_start; - } + BUG_ON(bytenr == 0); key.objectid = bytenr; @@ -4299,7 +4310,7 @@ static int btrfs_next_ref_path(struct btrfs_trans_handle *trans, return __next_ref_path(trans, extent_root, ref_path, 0); } -static int noinline get_new_locations(struct inode *reloc_inode, +static noinline int get_new_locations(struct inode *reloc_inode, struct btrfs_key *extent_key, u64 offset, int no_fragment, struct disk_extent **extents, @@ -4420,7 +4431,7 @@ out: return ret; } -static int noinline replace_one_extent(struct btrfs_trans_handle *trans, +static noinline int replace_one_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *extent_key, @@ -4778,7 +4789,7 @@ int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans, return 0; } -static int noinline invalidate_extent_cache(struct btrfs_root *root, +static noinline int invalidate_extent_cache(struct btrfs_root *root, struct extent_buffer *leaf, struct btrfs_block_group_cache *group, struct btrfs_root *target_root) @@ -4826,7 +4837,7 @@ static int noinline invalidate_extent_cache(struct btrfs_root *root, return 0; } -static int noinline replace_extents_in_leaf(struct btrfs_trans_handle *trans, +static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *leaf, struct btrfs_block_group_cache *group, @@ -5035,7 +5046,7 @@ int btrfs_cleanup_reloc_trees(struct btrfs_root *root) return 0; } -static int noinline init_reloc_tree(struct btrfs_trans_handle *trans, +static noinline int init_reloc_tree(struct btrfs_trans_handle *trans, struct btrfs_root *root) { struct btrfs_root *reloc_root; @@ -5102,7 +5113,7 @@ static int noinline init_reloc_tree(struct btrfs_trans_handle *trans, * tree blocks are shared between reloc trees, so they are also shared * between subvols. */ -static int noinline relocate_one_path(struct btrfs_trans_handle *trans, +static noinline int relocate_one_path(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *first_key, @@ -5199,7 +5210,7 @@ static int noinline relocate_one_path(struct btrfs_trans_handle *trans, return 0; } -static int noinline relocate_tree_block(struct btrfs_trans_handle *trans, +static noinline int relocate_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *first_key, @@ -5217,7 +5228,7 @@ static int noinline relocate_tree_block(struct btrfs_trans_handle *trans, return 0; } -static int noinline del_extent_zero(struct btrfs_trans_handle *trans, +static noinline int del_extent_zero(struct btrfs_trans_handle *trans, struct btrfs_root *extent_root, struct btrfs_path *path, struct btrfs_key *extent_key) @@ -5233,7 +5244,7 @@ out: return ret; } -static struct btrfs_root noinline *read_ref_root(struct btrfs_fs_info *fs_info, +static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info, struct btrfs_ref_path *ref_path) { struct btrfs_key root_key; @@ -5248,7 +5259,7 @@ static struct btrfs_root noinline *read_ref_root(struct btrfs_fs_info *fs_info, return btrfs_read_fs_root_no_name(fs_info, &root_key); } -static int noinline relocate_one_extent(struct btrfs_root *extent_root, +static noinline int relocate_one_extent(struct btrfs_root *extent_root, struct btrfs_path *path, struct btrfs_key *extent_key, struct btrfs_block_group_cache *group, @@ -5276,8 +5287,8 @@ static int noinline relocate_one_extent(struct btrfs_root *extent_root, ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS); if (!ref_path) { - ret = -ENOMEM; - goto out; + ret = -ENOMEM; + goto out; } for (loops = 0; ; loops++) { @@ -5497,7 +5508,7 @@ out: return ret; } -static struct inode noinline *create_reloc_inode(struct btrfs_fs_info *fs_info, +static noinline struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, struct btrfs_block_group_cache *group) { struct inode *inode = NULL; @@ -5617,7 +5628,7 @@ int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start) block_group = btrfs_lookup_block_group(info, group_start); BUG_ON(!block_group); - printk("btrfs relocating block group %llu flags %llu\n", + printk(KERN_INFO "btrfs relocating block group %llu flags %llu\n", (unsigned long long)block_group->key.objectid, (unsigned long long)block_group->flags); @@ -5649,7 +5660,7 @@ again: btrfs_remove_leaf_refs(info->tree_root, (u64)-1, 1); mutex_unlock(&root->fs_info->cleaner_mutex); - while(1) { + while (1) { ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; @@ -5712,7 +5723,7 @@ next: } if (total_found > 0) { - printk("btrfs found %llu extents in pass %d\n", + printk(KERN_INFO "btrfs found %llu extents in pass %d\n", (unsigned long long)total_found, pass); pass++; if (total_found == skipped && pass > 2) { @@ -5754,7 +5765,7 @@ static int find_first_block_group(struct btrfs_root *root, if (ret < 0) goto out; - while(1) { + while (1) { slot = path->slots[0]; leaf = path->nodes[0]; if (slot >= btrfs_header_nritems(leaf)) { @@ -5825,7 +5836,7 @@ int btrfs_read_block_groups(struct btrfs_root *root) if (!path) return -ENOMEM; - while(1) { + while (1) { ret = find_first_block_group(root, path, &key); if (ret > 0) { ret = 0; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 0bf7684207a..39edb551dca 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -32,7 +32,7 @@ static LIST_HEAD(states); #define LEAK_DEBUG 0 #ifdef LEAK_DEBUG -static spinlock_t leak_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(leak_lock); #endif #define BUFFER_LRU_MAX 64 @@ -81,7 +81,11 @@ void extent_io_exit(void) while (!list_empty(&states)) { state = list_entry(states.next, struct extent_state, leak_list); - printk("state leak: start %Lu end %Lu state %lu in tree %p refs %d\n", state->start, state->end, state->state, state->tree, atomic_read(&state->refs)); + printk(KERN_ERR "btrfs state leak: start %llu end %llu " + "state %lu in tree %p refs %d\n", + (unsigned long long)state->start, + (unsigned long long)state->end, + state->state, state->tree, atomic_read(&state->refs)); list_del(&state->leak_list); kmem_cache_free(extent_state_cache, state); @@ -89,7 +93,9 @@ void extent_io_exit(void) while (!list_empty(&buffers)) { eb = list_entry(buffers.next, struct extent_buffer, leak_list); - printk("buffer leak start %Lu len %lu refs %d\n", eb->start, eb->len, atomic_read(&eb->refs)); + printk(KERN_ERR "btrfs buffer leak start %llu len %lu " + "refs %d\n", (unsigned long long)eb->start, + eb->len, atomic_read(&eb->refs)); list_del(&eb->leak_list); kmem_cache_free(extent_buffer_cache, eb); } @@ -158,11 +164,11 @@ EXPORT_SYMBOL(free_extent_state); static struct rb_node *tree_insert(struct rb_root *root, u64 offset, struct rb_node *node) { - struct rb_node ** p = &root->rb_node; - struct rb_node * parent = NULL; + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; struct tree_entry *entry; - while(*p) { + while (*p) { parent = *p; entry = rb_entry(parent, struct tree_entry, rb_node); @@ -185,13 +191,13 @@ static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset, struct rb_node **next_ret) { struct rb_root *root = &tree->state; - struct rb_node * n = root->rb_node; + struct rb_node *n = root->rb_node; struct rb_node *prev = NULL; struct rb_node *orig_prev = NULL; struct tree_entry *entry; struct tree_entry *prev_entry = NULL; - while(n) { + while (n) { entry = rb_entry(n, struct tree_entry, rb_node); prev = n; prev_entry = entry; @@ -200,14 +206,13 @@ static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset, n = n->rb_left; else if (offset > entry->end) n = n->rb_right; - else { + else return n; - } } if (prev_ret) { orig_prev = prev; - while(prev && offset > prev_entry->end) { + while (prev && offset > prev_entry->end) { prev = rb_next(prev); prev_entry = rb_entry(prev, struct tree_entry, rb_node); } @@ -217,7 +222,7 @@ static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset, if (next_ret) { prev_entry = rb_entry(prev, struct tree_entry, rb_node); - while(prev && offset < prev_entry->start) { + while (prev && offset < prev_entry->start) { prev = rb_prev(prev); prev_entry = rb_entry(prev, struct tree_entry, rb_node); } @@ -233,9 +238,8 @@ static inline struct rb_node *tree_search(struct extent_io_tree *tree, struct rb_node *ret; ret = __etree_search(tree, offset, &prev, NULL); - if (!ret) { + if (!ret) return prev; - } return ret; } @@ -243,11 +247,11 @@ static struct extent_buffer *buffer_tree_insert(struct extent_io_tree *tree, u64 offset, struct rb_node *node) { struct rb_root *root = &tree->buffer; - struct rb_node ** p = &root->rb_node; - struct rb_node * parent = NULL; + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; struct extent_buffer *eb; - while(*p) { + while (*p) { parent = *p; eb = rb_entry(parent, struct extent_buffer, rb_node); @@ -268,10 +272,10 @@ static struct extent_buffer *buffer_search(struct extent_io_tree *tree, u64 offset) { struct rb_root *root = &tree->buffer; - struct rb_node * n = root->rb_node; + struct rb_node *n = root->rb_node; struct extent_buffer *eb; - while(n) { + while (n) { eb = rb_entry(n, struct extent_buffer, rb_node); if (offset < eb->start) n = n->rb_left; @@ -363,7 +367,9 @@ static int insert_state(struct extent_io_tree *tree, struct rb_node *node; if (end < start) { - printk("end < start %Lu %Lu\n", end, start); + printk(KERN_ERR "btrfs end < start %llu %llu\n", + (unsigned long long)end, + (unsigned long long)start); WARN_ON(1); } if (bits & EXTENT_DIRTY) @@ -376,7 +382,10 @@ static int insert_state(struct extent_io_tree *tree, if (node) { struct extent_state *found; found = rb_entry(node, struct extent_state, rb_node); - printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end); + printk(KERN_ERR "btrfs found node %llu %llu on insert of " + "%llu %llu\n", (unsigned long long)found->start, + (unsigned long long)found->end, + (unsigned long long)start, (unsigned long long)end); free_extent_state(state); return -EEXIST; } @@ -412,7 +421,6 @@ static int split_state(struct extent_io_tree *tree, struct extent_state *orig, if (node) { struct extent_state *found; found = rb_entry(node, struct extent_state, rb_node); - printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end); free_extent_state(prealloc); return -EEXIST; } @@ -661,8 +669,9 @@ static void set_state_bits(struct extent_io_tree *tree, * [start, end] is inclusive * This takes the tree lock. */ -static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits, - int exclusive, u64 *failed_start, gfp_t mask) +static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, + int bits, int exclusive, u64 *failed_start, + gfp_t mask) { struct extent_state *state; struct extent_state *prealloc = NULL; @@ -763,7 +772,7 @@ again: if (end < last_start) this_end = end; else - this_end = last_start -1; + this_end = last_start - 1; err = insert_state(tree, prealloc, start, this_end, bits); prealloc = NULL; @@ -891,8 +900,8 @@ int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, } EXPORT_SYMBOL(set_extent_uptodate); -static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, - gfp_t mask) +static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, + u64 end, gfp_t mask) { return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask); } @@ -904,8 +913,8 @@ static int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end, 0, NULL, mask); } -static int clear_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end, - gfp_t mask) +static int clear_extent_writeback(struct extent_io_tree *tree, u64 start, + u64 end, gfp_t mask) { return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask); } @@ -1025,11 +1034,10 @@ int find_first_extent_bit(struct extent_io_tree *tree, u64 start, * our range starts. */ node = tree_search(tree, start); - if (!node) { + if (!node) goto out; - } - while(1) { + while (1) { state = rb_entry(node, struct extent_state, rb_node); if (state->end >= start && (state->state & bits)) { *start_ret = state->start; @@ -1062,15 +1070,14 @@ struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree, * our range starts. */ node = tree_search(tree, start); - if (!node) { + if (!node) goto out; - } - while(1) { + while (1) { state = rb_entry(node, struct extent_state, rb_node); - if (state->end >= start && (state->state & bits)) { + if (state->end >= start && (state->state & bits)) return state; - } + node = rb_next(node); if (!node) break; @@ -1108,7 +1115,7 @@ static noinline u64 find_delalloc_range(struct extent_io_tree *tree, goto out; } - while(1) { + while (1) { state = rb_entry(node, struct extent_state, rb_node); if (found && (state->start != cur_start || (state->state & EXTENT_BOUNDARY))) { @@ -1150,7 +1157,7 @@ static noinline int __unlock_for_delalloc(struct inode *inode, if (index == locked_page->index && end_index == index) return 0; - while(nr_pages > 0) { + while (nr_pages > 0) { ret = find_get_pages_contig(inode->i_mapping, index, min_t(unsigned long, nr_pages, ARRAY_SIZE(pages)), pages); @@ -1186,7 +1193,7 @@ static noinline int lock_delalloc_pages(struct inode *inode, /* skip the page at the start index */ nrpages = end_index - index + 1; - while(nrpages > 0) { + while (nrpages > 0) { ret = find_get_pages_contig(inode->i_mapping, index, min_t(unsigned long, nrpages, ARRAY_SIZE(pages)), pages); @@ -1263,17 +1270,16 @@ again: * pages in order, so we can't process delalloc bytes before * locked_page */ - if (delalloc_start < *start) { + if (delalloc_start < *start) delalloc_start = *start; - } /* * make sure to limit the number of pages we try to lock down * if we're looping. */ - if (delalloc_end + 1 - delalloc_start > max_bytes && loops) { + if (delalloc_end + 1 - delalloc_start > max_bytes && loops) delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1; - } + /* step two, lock all the pages after the page that has start */ ret = lock_delalloc_pages(inode, locked_page, delalloc_start, delalloc_end); @@ -1341,7 +1347,7 @@ int extent_clear_unlock_delalloc(struct inode *inode, if (!(unlock_pages || clear_dirty || set_writeback || end_writeback)) return 0; - while(nr_pages > 0) { + while (nr_pages > 0) { ret = find_get_pages_contig(inode->i_mapping, index, min_t(unsigned long, nr_pages, ARRAY_SIZE(pages)), pages); @@ -1384,7 +1390,6 @@ u64 count_range_bits(struct extent_io_tree *tree, int found = 0; if (search_end <= cur_start) { - printk("search_end %Lu start %Lu\n", search_end, cur_start); WARN_ON(1); return 0; } @@ -1399,11 +1404,10 @@ u64 count_range_bits(struct extent_io_tree *tree, * our range starts. */ node = tree_search(tree, cur_start); - if (!node) { + if (!node) goto out; - } - while(1) { + while (1) { state = rb_entry(node, struct extent_state, rb_node); if (state->start > search_end) break; @@ -1927,19 +1931,15 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, nr = bio_get_nr_vecs(bdev); bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH); - if (!bio) { - printk("failed to allocate bio nr %d\n", nr); - } bio_add_page(bio, page, page_size, offset); bio->bi_end_io = end_io_func; bio->bi_private = tree; - if (bio_ret) { + if (bio_ret) *bio_ret = bio; - } else { + else ret = submit_one_bio(rw, bio, mirror_num, bio_flags); - } return ret; } @@ -2028,13 +2028,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, break; } extent_offset = cur - em->start; - if (extent_map_end(em) <= cur) { -printk("bad mapping em [%Lu %Lu] cur %Lu\n", em->start, extent_map_end(em), cur); - } BUG_ON(extent_map_end(em) <= cur); - if (end < cur) { -printk("2bad mapping end %Lu cur %Lu\n", end, cur); - } BUG_ON(end < cur); if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) @@ -2199,7 +2193,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, delalloc_end = 0; page_started = 0; if (!epd->extent_locked) { - while(delalloc_end < page_end) { + while (delalloc_end < page_end) { nr_delalloc = find_lock_delalloc_range(inode, tree, page, &delalloc_start, @@ -2242,9 +2236,8 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, nr_written++; end = page_end; - if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) { - printk("found delalloc bits after lock_extent\n"); - } + if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) + printk(KERN_ERR "btrfs delalloc bits after lock_extent\n"); if (last_byte <= start) { clear_extent_dirty(tree, start, page_end, GFP_NOFS); @@ -2297,7 +2290,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS); - unlock_extent(tree, unlock_start, cur + iosize -1, + unlock_extent(tree, unlock_start, cur + iosize - 1, GFP_NOFS); /* @@ -2344,9 +2337,9 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, set_range_writeback(tree, cur, cur + iosize - 1); if (!PageWriteback(page)) { - printk("warning page %lu not writeback, " - "cur %llu end %llu\n", page->index, - (unsigned long long)cur, + printk(KERN_ERR "btrfs warning page %lu not " + "writeback, cur %llu end %llu\n", + page->index, (unsigned long long)cur, (unsigned long long)end); } @@ -2430,8 +2423,8 @@ static int extent_write_cache_pages(struct extent_io_tree *tree, retry: while (!done && (index <= end) && (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, - PAGECACHE_TAG_DIRTY, - min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) { + PAGECACHE_TAG_DIRTY, min(end - index, + (pgoff_t)PAGEVEC_SIZE-1) + 1))) { unsigned i; scanned = 1; @@ -2536,9 +2529,8 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page, extent_write_cache_pages(tree, mapping, &wbc_writepages, __extent_writepage, &epd, flush_write_bio); - if (epd.bio) { + if (epd.bio) submit_one_bio(WRITE, epd.bio, 0, 0); - } return ret; } EXPORT_SYMBOL(extent_write_full_page); @@ -2568,7 +2560,7 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode, .range_end = end + 1, }; - while(start <= end) { + while (start <= end) { page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); if (clear_page_dirty_for_io(page)) ret = __extent_writepage(page, &wbc_writepages, &epd); @@ -2606,9 +2598,8 @@ int extent_writepages(struct extent_io_tree *tree, ret = extent_write_cache_pages(tree, mapping, wbc, __extent_writepage, &epd, flush_write_bio); - if (epd.bio) { + if (epd.bio) submit_one_bio(WRITE, epd.bio, 0, 0); - } return ret; } EXPORT_SYMBOL(extent_writepages); @@ -2666,7 +2657,7 @@ int extent_invalidatepage(struct extent_io_tree *tree, u64 end = start + PAGE_CACHE_SIZE - 1; size_t blocksize = page->mapping->host->i_sb->s_blocksize; - start += (offset + blocksize -1) & ~(blocksize - 1); + start += (offset + blocksize - 1) & ~(blocksize - 1); if (start > end) return 0; @@ -2727,12 +2718,12 @@ int extent_prepare_write(struct extent_io_tree *tree, orig_block_start = block_start; lock_extent(tree, page_start, page_end, GFP_NOFS); - while(block_start <= block_end) { + while (block_start <= block_end) { em = get_extent(inode, page, page_offset, block_start, block_end - block_start + 1, 1); - if (IS_ERR(em) || !em) { + if (IS_ERR(em) || !em) goto err; - } + cur_end = min(block_end, extent_map_end(em) - 1); block_off_start = block_start & (PAGE_CACHE_SIZE - 1); block_off_end = block_off_start + blocksize; @@ -3170,7 +3161,7 @@ int set_extent_buffer_dirty(struct extent_io_tree *tree, } __set_page_dirty_nobuffers(extent_buffer_page(eb, i)); set_extent_dirty(tree, page_offset(page), - page_offset(page) + PAGE_CACHE_SIZE -1, + page_offset(page) + PAGE_CACHE_SIZE - 1, GFP_NOFS); unlock_page(page); } @@ -3235,7 +3226,7 @@ int extent_range_uptodate(struct extent_io_tree *tree, ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1); if (ret) return 1; - while(start <= end) { + while (start <= end) { index = start >> PAGE_CACHE_SHIFT; page = find_get_page(tree->mapping, index); uptodate = PageUptodate(page); @@ -3321,16 +3312,12 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, lock_page(page); } locked_pages++; - if (!PageUptodate(page)) { + if (!PageUptodate(page)) all_uptodate = 0; - } } if (all_uptodate) { if (start_i == 0) eb->flags |= EXTENT_UPTODATE; - if (ret) { - printk("all up to date but ret is %d\n", ret); - } goto unlock_exit; } @@ -3345,10 +3332,8 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, err = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num, &bio_flags); - if (err) { + if (err) ret = err; - printk("err %d from __extent_read_full_page\n", ret); - } } else { unlock_page(page); } @@ -3357,26 +3342,23 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, if (bio) submit_one_bio(READ, bio, mirror_num, bio_flags); - if (ret || !wait) { - if (ret) - printk("ret %d wait %d returning\n", ret, wait); + if (ret || !wait) return ret; - } + for (i = start_i; i < num_pages; i++) { page = extent_buffer_page(eb, i); wait_on_page_locked(page); - if (!PageUptodate(page)) { - printk("page not uptodate after wait_on_page_locked\n"); + if (!PageUptodate(page)) ret = -EIO; - } } + if (!ret) eb->flags |= EXTENT_UPTODATE; return ret; unlock_exit: i = start_i; - while(locked_pages > 0) { + while (locked_pages > 0) { page = extent_buffer_page(eb, i); i++; unlock_page(page); @@ -3403,7 +3385,7 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv, offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1); - while(len > 0) { + while (len > 0) { page = extent_buffer_page(eb, i); cur = min(len, (PAGE_CACHE_SIZE - offset)); @@ -3442,8 +3424,11 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, offset = 0; *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset; } + if (start + min_len > eb->len) { -printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len); + printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, " + "wanted %lu %lu\n", (unsigned long long)eb->start, + eb->len, start, min_len); WARN_ON(1); } @@ -3506,7 +3491,7 @@ int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv, offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1); - while(len > 0) { + while (len > 0) { page = extent_buffer_page(eb, i); cur = min(len, (PAGE_CACHE_SIZE - offset)); @@ -3542,7 +3527,7 @@ void write_extent_buffer(struct extent_buffer *eb, const void *srcv, offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1); - while(len > 0) { + while (len > 0) { page = extent_buffer_page(eb, i); WARN_ON(!PageUptodate(page)); @@ -3574,7 +3559,7 @@ void memset_extent_buffer(struct extent_buffer *eb, char c, offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1); - while(len > 0) { + while (len > 0) { page = extent_buffer_page(eb, i); WARN_ON(!PageUptodate(page)); @@ -3607,7 +3592,7 @@ void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src, offset = (start_offset + dst_offset) & ((unsigned long)PAGE_CACHE_SIZE - 1); - while(len > 0) { + while (len > 0) { page = extent_buffer_page(dst, i); WARN_ON(!PageUptodate(page)); @@ -3674,17 +3659,17 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, unsigned long src_i; if (src_offset + len > dst->len) { - printk("memmove bogus src_offset %lu move len %lu len %lu\n", - src_offset, len, dst->len); + printk(KERN_ERR "btrfs memmove bogus src_offset %lu move " + "len %lu dst len %lu\n", src_offset, len, dst->len); BUG_ON(1); } if (dst_offset + len > dst->len) { - printk("memmove bogus dst_offset %lu move len %lu len %lu\n", - dst_offset, len, dst->len); + printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move " + "len %lu dst len %lu\n", dst_offset, len, dst->len); BUG_ON(1); } - while(len > 0) { + while (len > 0) { dst_off_in_page = (start_offset + dst_offset) & ((unsigned long)PAGE_CACHE_SIZE - 1); src_off_in_page = (start_offset + src_offset) & @@ -3722,20 +3707,20 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, unsigned long src_i; if (src_offset + len > dst->len) { - printk("memmove bogus src_offset %lu move len %lu len %lu\n", - src_offset, len, dst->len); + printk(KERN_ERR "btrfs memmove bogus src_offset %lu move " + "len %lu len %lu\n", src_offset, len, dst->len); BUG_ON(1); } if (dst_offset + len > dst->len) { - printk("memmove bogus dst_offset %lu move len %lu len %lu\n", - dst_offset, len, dst->len); + printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move " + "len %lu len %lu\n", dst_offset, len, dst->len); BUG_ON(1); } if (dst_offset < src_offset) { memcpy_extent_buffer(dst, dst_offset, src_offset, len); return; } - while(len > 0) { + while (len > 0) { dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT; src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT; diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index fd3ebfb8c3c..4a83e33ada3 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -89,11 +89,11 @@ EXPORT_SYMBOL(free_extent_map); static struct rb_node *tree_insert(struct rb_root *root, u64 offset, struct rb_node *node) { - struct rb_node ** p = &root->rb_node; - struct rb_node * parent = NULL; + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; struct extent_map *entry; - while(*p) { + while (*p) { parent = *p; entry = rb_entry(parent, struct extent_map, rb_node); @@ -122,13 +122,13 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 offset, struct rb_node **prev_ret, struct rb_node **next_ret) { - struct rb_node * n = root->rb_node; + struct rb_node *n = root->rb_node; struct rb_node *prev = NULL; struct rb_node *orig_prev = NULL; struct extent_map *entry; struct extent_map *prev_entry = NULL; - while(n) { + while (n) { entry = rb_entry(n, struct extent_map, rb_node); prev = n; prev_entry = entry; @@ -145,7 +145,7 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 offset, if (prev_ret) { orig_prev = prev; - while(prev && offset >= extent_map_end(prev_entry)) { + while (prev && offset >= extent_map_end(prev_entry)) { prev = rb_next(prev); prev_entry = rb_entry(prev, struct extent_map, rb_node); } @@ -155,7 +155,7 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 offset, if (next_ret) { prev_entry = rb_entry(prev, struct extent_map, rb_node); - while(prev && offset < prev_entry->start) { + while (prev && offset < prev_entry->start) { prev = rb_prev(prev); prev_entry = rb_entry(prev, struct extent_map, rb_node); } diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index cc6e0b6de94..b11abfad81a 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -24,7 +24,7 @@ #include "transaction.h" #include "print-tree.h" -#define MAX_CSUM_ITEMS(r,size) ((((BTRFS_LEAF_DATA_SIZE(r) - \ +#define MAX_CSUM_ITEMS(r, size) ((((BTRFS_LEAF_DATA_SIZE(r) - \ sizeof(struct btrfs_item) * 2) / \ size) - 1)) int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, @@ -166,7 +166,7 @@ int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, WARN_ON(bio->bi_vcnt <= 0); disk_bytenr = (u64)bio->bi_sector << 9; - while(bio_index < bio->bi_vcnt) { + while (bio_index < bio->bi_vcnt) { offset = page_offset(bvec->bv_page) + bvec->bv_offset; ret = btrfs_find_ordered_sum(inode, offset, disk_bytenr, &sum); if (ret == 0) @@ -192,8 +192,9 @@ int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, offset + bvec->bv_len - 1, EXTENT_NODATASUM, GFP_NOFS); } else { - printk("no csum found for inode %lu " - "start %llu\n", inode->i_ino, + printk(KERN_INFO "btrfs no csum found " + "for inode %lu start %llu\n", + inode->i_ino, (unsigned long long)offset); } item = NULL; @@ -373,7 +374,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, BUG_ON(!ordered); sums->bytenr = ordered->start; - while(bio_index < bio->bi_vcnt) { + while (bio_index < bio->bi_vcnt) { if (!contig) offset = page_offset(bvec->bv_page) + bvec->bv_offset; @@ -507,7 +508,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, path = btrfs_alloc_path(); - while(1) { + while (1) { key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; key.offset = end_byte - 1; key.type = BTRFS_EXTENT_CSUM_KEY; @@ -715,9 +716,8 @@ again: goto csum; diff = diff - btrfs_item_size_nr(leaf, path->slots[0]); - if (diff != csum_size) { + if (diff != csum_size) goto insert; - } ret = btrfs_extend_item(trans, root, path, diff); BUG_ON(ret); @@ -732,7 +732,7 @@ insert: u64 next_sector = sector_sum->bytenr; struct btrfs_sector_sum *next = sector_sum + 1; - while(tmp < sums->len) { + while (tmp < sums->len) { if (next_sector + root->sectorsize != next->bytenr) break; tmp += root->sectorsize; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 5908521922f..0e3a13a4565 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -44,10 +44,10 @@ /* simple helper to fault in pages and copy. This should go away * and be replaced with calls into generic code. */ -static int noinline btrfs_copy_from_user(loff_t pos, int num_pages, +static noinline int btrfs_copy_from_user(loff_t pos, int num_pages, int write_bytes, struct page **prepared_pages, - const char __user * buf) + const char __user *buf) { long page_fault = 0; int i; @@ -78,7 +78,7 @@ static int noinline btrfs_copy_from_user(loff_t pos, int num_pages, /* * unlocks pages after btrfs_file_write is done with them */ -static void noinline btrfs_drop_pages(struct page **pages, size_t num_pages) +static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages) { size_t i; for (i = 0; i < num_pages; i++) { @@ -103,7 +103,7 @@ static void noinline btrfs_drop_pages(struct page **pages, size_t num_pages) * this also makes the decision about creating an inline extent vs * doing real data extents, marking pages dirty and delalloc as required. */ -static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans, +static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct file *file, struct page **pages, @@ -137,9 +137,6 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans, btrfs_set_trans_block_group(trans, inode); hint_byte = 0; - if ((end_of_last_block & 4095) == 0) { - printk("strange end of last %Lu %zu %Lu\n", start_pos, write_bytes, end_of_last_block); - } set_extent_uptodate(io_tree, start_pos, end_of_last_block, GFP_NOFS); /* check for reserved extents on each page, we don't want @@ -185,7 +182,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, len = (u64)-1; testend = 0; } - while(1) { + while (1) { if (!split) split = alloc_extent_map(GFP_NOFS); if (!split2) @@ -295,7 +292,7 @@ int btrfs_check_file(struct btrfs_root *root, struct inode *inode) path = btrfs_alloc_path(); ret = btrfs_lookup_file_extent(NULL, root, path, inode->i_ino, last_offset, 0); - while(1) { + while (1) { nritems = btrfs_header_nritems(path->nodes[0]); if (path->slots[0] >= nritems) { ret = btrfs_next_leaf(root, path); @@ -314,8 +311,10 @@ int btrfs_check_file(struct btrfs_root *root, struct inode *inode) if (found_key.offset < last_offset) { WARN_ON(1); btrfs_print_leaf(root, leaf); - printk("inode %lu found offset %Lu expected %Lu\n", - inode->i_ino, found_key.offset, last_offset); + printk(KERN_ERR "inode %lu found offset %llu " + "expected %llu\n", inode->i_ino, + (unsigned long long)found_key.offset, + (unsigned long long)last_offset); err = 1; goto out; } @@ -331,7 +330,7 @@ int btrfs_check_file(struct btrfs_root *root, struct inode *inode) extent_end = found_key.offset + btrfs_file_extent_inline_len(leaf, extent); extent_end = (extent_end + root->sectorsize - 1) & - ~((u64)root->sectorsize -1 ); + ~((u64)root->sectorsize - 1); } last_offset = extent_end; path->slots[0]++; @@ -339,8 +338,9 @@ int btrfs_check_file(struct btrfs_root *root, struct inode *inode) if (0 && last_offset < inode->i_size) { WARN_ON(1); btrfs_print_leaf(root, leaf); - printk("inode %lu found offset %Lu size %Lu\n", inode->i_ino, - last_offset, inode->i_size); + printk(KERN_ERR "inode %lu found offset %llu size %llu\n", + inode->i_ino, (unsigned long long)last_offset, + (unsigned long long)inode->i_size); err = 1; } @@ -362,7 +362,7 @@ out: * inline_limit is used to tell this code which offsets in the file to keep * if they contain inline extents. */ -int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans, +noinline int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, u64 start, u64 end, u64 inline_limit, u64 *hint_byte) { @@ -398,7 +398,7 @@ int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans, path = btrfs_alloc_path(); if (!path) return -ENOMEM; - while(1) { + while (1) { recow = 0; btrfs_release_path(root, path); ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, @@ -649,16 +649,15 @@ next_slot: if (disk_bytenr != 0) { ret = btrfs_update_extent_ref(trans, root, disk_bytenr, orig_parent, - leaf->start, + leaf->start, root->root_key.objectid, trans->transid, ins.objectid); BUG_ON(ret); } btrfs_release_path(root, path); - if (disk_bytenr != 0) { + if (disk_bytenr != 0) inode_add_bytes(inode, extent_end - end); - } } if (found_extent && !keep) { @@ -944,7 +943,7 @@ done: * waits for data=ordered extents to finish before allowing the pages to be * modified. */ -static int noinline prepare_pages(struct btrfs_root *root, struct file *file, +static noinline int prepare_pages(struct btrfs_root *root, struct file *file, struct page **pages, size_t num_pages, loff_t pos, unsigned long first_index, unsigned long last_index, size_t write_bytes) @@ -979,7 +978,8 @@ again: struct btrfs_ordered_extent *ordered; lock_extent(&BTRFS_I(inode)->io_tree, start_pos, last_pos - 1, GFP_NOFS); - ordered = btrfs_lookup_first_ordered_extent(inode, last_pos -1); + ordered = btrfs_lookup_first_ordered_extent(inode, + last_pos - 1); if (ordered && ordered->file_offset + ordered->len > start_pos && ordered->file_offset < last_pos) { @@ -1085,7 +1085,7 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf, } } - while(count > 0) { + while (count > 0) { size_t offset = pos & (PAGE_CACHE_SIZE - 1); size_t write_bytes = min(count, nrptrs * (size_t)PAGE_CACHE_SIZE - @@ -1178,7 +1178,7 @@ out_nolock: return num_written ? num_written : err; } -int btrfs_release_file(struct inode * inode, struct file * filp) +int btrfs_release_file(struct inode *inode, struct file *filp) { if (filp->private_data) btrfs_ioctl_trans_end(filp); @@ -1237,9 +1237,8 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync) } ret = btrfs_log_dentry_safe(trans, root, file->f_dentry); - if (ret < 0) { + if (ret < 0) goto out; - } /* we've logged all the items and now have a consistent * version of the file in the log. It is possible that diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 2e69b9c3043..d1e5f0e84c5 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -213,10 +213,13 @@ static int __btrfs_add_free_space(struct btrfs_block_group_cache *block_group, info->offset = offset; info->bytes += bytes; } else if (right_info && right_info->offset != offset+bytes) { - printk(KERN_ERR "adding space in the middle of an existing " - "free space area. existing: offset=%Lu, bytes=%Lu. " - "new: offset=%Lu, bytes=%Lu\n", right_info->offset, - right_info->bytes, offset, bytes); + printk(KERN_ERR "btrfs adding space in the middle of an " + "existing free space area. existing: " + "offset=%llu, bytes=%llu. new: offset=%llu, " + "bytes=%llu\n", (unsigned long long)right_info->offset, + (unsigned long long)right_info->bytes, + (unsigned long long)offset, + (unsigned long long)bytes); BUG(); } @@ -225,11 +228,14 @@ static int __btrfs_add_free_space(struct btrfs_block_group_cache *block_group, if (unlikely((left_info->offset + left_info->bytes) != offset)) { - printk(KERN_ERR "free space to the left of new free " - "space isn't quite right. existing: offset=%Lu," - " bytes=%Lu. new: offset=%Lu, bytes=%Lu\n", - left_info->offset, left_info->bytes, offset, - bytes); + printk(KERN_ERR "btrfs free space to the left " + "of new free space isn't " + "quite right. existing: offset=%llu, " + "bytes=%llu. new: offset=%llu, bytes=%llu\n", + (unsigned long long)left_info->offset, + (unsigned long long)left_info->bytes, + (unsigned long long)offset, + (unsigned long long)bytes); BUG(); } @@ -265,8 +271,7 @@ out: BUG(); } - if (alloc_info) - kfree(alloc_info); + kfree(alloc_info); return ret; } @@ -283,9 +288,11 @@ __btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, if (info && info->offset == offset) { if (info->bytes < bytes) { - printk(KERN_ERR "Found free space at %Lu, size %Lu," - "trying to use %Lu\n", - info->offset, info->bytes, bytes); + printk(KERN_ERR "Found free space at %llu, size %llu," + "trying to use %llu\n", + (unsigned long long)info->offset, + (unsigned long long)info->bytes, + (unsigned long long)bytes); WARN_ON(1); ret = -EINVAL; goto out; @@ -401,8 +408,6 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, info = rb_entry(n, struct btrfs_free_space, offset_index); if (info->bytes >= bytes) count++; - //printk(KERN_INFO "offset=%Lu, bytes=%Lu\n", info->offset, - // info->bytes); } printk(KERN_INFO "%d blocks of free space at or bigger than bytes is" "\n", count); diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index 80038c5ef7c..2aa79873eb4 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -129,7 +129,6 @@ int btrfs_find_free_objectid(struct btrfs_trans_handle *trans, last_ino = key.objectid + 1; path->slots[0]++; } - // FIXME -ENOSPC BUG_ON(1); found: btrfs_release_path(root, path); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 068bad46338..1b35ea63b6c 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -124,7 +124,7 @@ int btrfs_check_free_space(struct btrfs_root *root, u64 num_required, * the btree. The caller should have done a btrfs_drop_extents so that * no overlapping inline items exist in the btree */ -static int noinline insert_inline_extent(struct btrfs_trans_handle *trans, +static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, u64 start, size_t size, size_t compressed_size, struct page **compressed_pages) @@ -148,7 +148,8 @@ static int noinline insert_inline_extent(struct btrfs_trans_handle *trans, cur_size = compressed_size; } - path = btrfs_alloc_path(); if (!path) + path = btrfs_alloc_path(); + if (!path) return -ENOMEM; btrfs_set_trans_block_group(trans, inode); @@ -165,7 +166,6 @@ static int noinline insert_inline_extent(struct btrfs_trans_handle *trans, BUG_ON(ret); if (ret) { err = ret; - printk("got bad ret %d\n", ret); goto fail; } leaf = path->nodes[0]; @@ -181,7 +181,7 @@ static int noinline insert_inline_extent(struct btrfs_trans_handle *trans, if (use_compress) { struct page *cpage; int i = 0; - while(compressed_size > 0) { + while (compressed_size > 0) { cpage = compressed_pages[i]; cur_size = min_t(unsigned long, compressed_size, PAGE_CACHE_SIZE); @@ -519,8 +519,7 @@ free_pages_out: WARN_ON(pages[i]->mapping); page_cache_release(pages[i]); } - if (pages) - kfree(pages); + kfree(pages); goto out; } @@ -549,7 +548,7 @@ static noinline int submit_compressed_extents(struct inode *inode, trans = btrfs_join_transaction(root, 1); - while(!list_empty(&async_cow->extents)) { + while (!list_empty(&async_cow->extents)) { async_extent = list_entry(async_cow->extents.next, struct async_extent, list); list_del(&async_extent->list); @@ -562,8 +561,8 @@ static noinline int submit_compressed_extents(struct inode *inode, unsigned long nr_written = 0; lock_extent(io_tree, async_extent->start, - async_extent->start + async_extent->ram_size - 1, - GFP_NOFS); + async_extent->start + + async_extent->ram_size - 1, GFP_NOFS); /* allocate blocks */ cow_file_range(inode, async_cow->locked_page, @@ -581,7 +580,7 @@ static noinline int submit_compressed_extents(struct inode *inode, if (!page_started) extent_write_locked_range(io_tree, inode, async_extent->start, - async_extent->start + + async_extent->start + async_extent->ram_size - 1, btrfs_get_extent, WB_SYNC_ALL); @@ -618,7 +617,7 @@ static noinline int submit_compressed_extents(struct inode *inode, set_bit(EXTENT_FLAG_PINNED, &em->flags); set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); - while(1) { + while (1) { spin_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em); spin_unlock(&em_tree->lock); @@ -651,11 +650,11 @@ static noinline int submit_compressed_extents(struct inode *inode, NULL, 1, 1, 0, 1, 1, 0); ret = btrfs_submit_compressed_write(inode, - async_extent->start, - async_extent->ram_size, - ins.objectid, - ins.offset, async_extent->pages, - async_extent->nr_pages); + async_extent->start, + async_extent->ram_size, + ins.objectid, + ins.offset, async_extent->pages, + async_extent->nr_pages); BUG_ON(ret); trans = btrfs_join_transaction(root, 1); @@ -735,14 +734,13 @@ static noinline int cow_file_range(struct inode *inode, btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); - while(disk_num_bytes > 0) { + while (disk_num_bytes > 0) { cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent); ret = btrfs_reserve_extent(trans, root, cur_alloc_size, root->sectorsize, 0, alloc_hint, (u64)-1, &ins, 1); - if (ret) { - BUG(); - } + BUG_ON(ret); + em = alloc_extent_map(GFP_NOFS); em->start = start; em->orig_start = em->start; @@ -755,7 +753,7 @@ static noinline int cow_file_range(struct inode *inode, em->bdev = root->fs_info->fs_devices->latest_bdev; set_bit(EXTENT_FLAG_PINNED, &em->flags); - while(1) { + while (1) { spin_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em); spin_unlock(&em_tree->lock); @@ -779,11 +777,9 @@ static noinline int cow_file_range(struct inode *inode, BUG_ON(ret); } - if (disk_num_bytes < cur_alloc_size) { - printk("num_bytes %Lu cur_alloc %Lu\n", disk_num_bytes, - cur_alloc_size); + if (disk_num_bytes < cur_alloc_size) break; - } + /* we're not doing compressed IO, don't unlock the first * page (which the caller expects to stay locked), don't * clear any dirty bits and don't set any writeback bits @@ -842,9 +838,8 @@ static noinline void async_cow_submit(struct btrfs_work *work) waitqueue_active(&root->fs_info->async_submit_wait)) wake_up(&root->fs_info->async_submit_wait); - if (async_cow->inode) { + if (async_cow->inode) submit_compressed_extents(async_cow->inode, async_cow); - } } static noinline void async_cow_free(struct btrfs_work *work) @@ -871,7 +866,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page, clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED | EXTENT_DELALLOC, 1, 0, GFP_NOFS); - while(start < end) { + while (start < end) { async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); async_cow->inode = inode; async_cow->root = root; @@ -904,7 +899,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page, limit)); } - while(atomic_read(&root->fs_info->async_submit_draining) && + while (atomic_read(&root->fs_info->async_submit_draining) && atomic_read(&root->fs_info->async_delalloc_pages)) { wait_event(root->fs_info->async_submit_wait, (atomic_read(&root->fs_info->async_delalloc_pages) == @@ -918,7 +913,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page, return 0; } -static int noinline csum_exist_in_range(struct btrfs_root *root, +static noinline int csum_exist_in_range(struct btrfs_root *root, u64 bytenr, u64 num_bytes) { int ret; @@ -1146,13 +1141,13 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page, if (btrfs_test_flag(inode, NODATACOW)) ret = run_delalloc_nocow(inode, locked_page, start, end, - page_started, 1, nr_written); + page_started, 1, nr_written); else if (btrfs_test_flag(inode, PREALLOC)) ret = run_delalloc_nocow(inode, locked_page, start, end, - page_started, 0, nr_written); + page_started, 0, nr_written); else ret = cow_file_range_async(inode, locked_page, start, end, - page_started, nr_written); + page_started, nr_written); return ret; } @@ -1200,8 +1195,11 @@ static int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end, spin_lock(&root->fs_info->delalloc_lock); if (end - start + 1 > root->fs_info->delalloc_bytes) { - printk("warning: delalloc account %Lu %Lu\n", - end - start + 1, root->fs_info->delalloc_bytes); + printk(KERN_INFO "btrfs warning: delalloc account " + "%llu %llu\n", + (unsigned long long)end - start + 1, + (unsigned long long) + root->fs_info->delalloc_bytes); root->fs_info->delalloc_bytes = 0; BTRFS_I(inode)->delalloc_bytes = 0; } else { @@ -1241,9 +1239,8 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset, ret = btrfs_map_block(map_tree, READ, logical, &map_length, NULL, 0); - if (map_length < length + size) { + if (map_length < length + size) return 1; - } return 0; } @@ -1255,8 +1252,9 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset, * At IO completion time the cums attached on the ordered extent record * are inserted into the btree */ -static int __btrfs_submit_bio_start(struct inode *inode, int rw, struct bio *bio, - int mirror_num, unsigned long bio_flags) +static int __btrfs_submit_bio_start(struct inode *inode, int rw, + struct bio *bio, int mirror_num, + unsigned long bio_flags) { struct btrfs_root *root = BTRFS_I(inode)->root; int ret = 0; @@ -1341,9 +1339,8 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans, int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end) { - if ((end & (PAGE_CACHE_SIZE - 1)) == 0) { + if ((end & (PAGE_CACHE_SIZE - 1)) == 0) WARN_ON(1); - } return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); } @@ -1755,14 +1752,14 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, } local_irq_save(flags); kaddr = kmap_atomic(page, KM_IRQ0); - if (ret) { + if (ret) goto zeroit; - } + csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1); btrfs_csum_final(csum, (char *)&csum); - if (csum != private) { + if (csum != private) goto zeroit; - } + kunmap_atomic(kaddr, KM_IRQ0); local_irq_restore(flags); good: @@ -1773,9 +1770,10 @@ good: return 0; zeroit: - printk("btrfs csum failed ino %lu off %llu csum %u private %Lu\n", - page->mapping->host->i_ino, (unsigned long long)start, csum, - private); + printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u " + "private %llu\n", page->mapping->host->i_ino, + (unsigned long long)start, csum, + (unsigned long long)private); memset(kaddr + offset, 1, end - start + 1); flush_dcache_page(page); kunmap_atomic(kaddr, KM_IRQ0); @@ -2097,9 +2095,8 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, /* * copy everything in the in-memory inode into the btree. */ -int noinline btrfs_update_inode(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct inode *inode) +noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct inode *inode) { struct btrfs_inode_item *inode_item; struct btrfs_path *path; @@ -2174,7 +2171,7 @@ int btrfs_unlink_inode(struct btrfs_trans_handle *trans, inode->i_ino, dir->i_ino, &index); if (ret) { - printk("failed to delete reference to %.*s, " + printk(KERN_INFO "btrfs failed to delete reference to %.*s, " "inode %lu parent %lu\n", name_len, name, inode->i_ino, dir->i_ino); goto err; @@ -2280,9 +2277,8 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) /* now the directory is empty */ err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode, dentry->d_name.name, dentry->d_name.len); - if (!err) { + if (!err) btrfs_i_size_write(inode, 0); - } fail_trans: nr = trans->blocks_used; @@ -2516,9 +2512,9 @@ noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, search_again: ret = btrfs_search_slot(trans, root, &key, path, -1, 1); - if (ret < 0) { + if (ret < 0) goto error; - } + if (ret > 0) { /* there are no items in the tree for us to truncate, we're * done @@ -2530,7 +2526,7 @@ search_again: path->slots[0]--; } - while(1) { + while (1) { fi = NULL; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); @@ -2562,19 +2558,18 @@ search_again: item_end--; } if (item_end < new_size) { - if (found_type == BTRFS_DIR_ITEM_KEY) { + if (found_type == BTRFS_DIR_ITEM_KEY) found_type = BTRFS_INODE_ITEM_KEY; - } else if (found_type == BTRFS_EXTENT_ITEM_KEY) { + else if (found_type == BTRFS_EXTENT_ITEM_KEY) found_type = BTRFS_EXTENT_DATA_KEY; - } else if (found_type == BTRFS_EXTENT_DATA_KEY) { + else if (found_type == BTRFS_EXTENT_DATA_KEY) found_type = BTRFS_XATTR_ITEM_KEY; - } else if (found_type == BTRFS_XATTR_ITEM_KEY) { + else if (found_type == BTRFS_XATTR_ITEM_KEY) found_type = BTRFS_INODE_REF_KEY; - } else if (found_type) { + else if (found_type) found_type--; - } else { + else break; - } btrfs_set_key_type(&key, found_type); goto next; } @@ -2656,7 +2651,7 @@ delete: pending_del_nr++; pending_del_slot = path->slots[0]; } else { - printk("bad pending slot %d pending_del_nr %d pending_del_slot %d\n", path->slots[0], pending_del_nr, pending_del_slot); + BUG(); } } else { break; @@ -2938,9 +2933,10 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry, namelen, 0); if (IS_ERR(di)) ret = PTR_ERR(di); - if (!di || IS_ERR(di)) { + + if (!di || IS_ERR(di)) goto out_err; - } + btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); out: btrfs_free_path(path); @@ -3020,8 +3016,8 @@ static int btrfs_init_locked_inode(struct inode *inode, void *p) static int btrfs_find_actor(struct inode *inode, void *opaque) { struct btrfs_iget_args *args = opaque; - return (args->ino == inode->i_ino && - args->root == BTRFS_I(inode)->root); + return args->ino == inode->i_ino && + args->root == BTRFS_I(inode)->root; } struct inode *btrfs_ilookup(struct super_block *s, u64 objectid, @@ -3085,7 +3081,7 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) { - struct inode * inode; + struct inode *inode; struct btrfs_inode *bi = BTRFS_I(dir); struct btrfs_root *root = bi->root; struct btrfs_root *sub_root = root; @@ -3385,9 +3381,8 @@ int btrfs_set_inode_index(struct inode *dir, u64 *index) if (BTRFS_I(dir)->index_cnt == (u64)-1) { ret = btrfs_set_inode_index_count(dir); - if (ret) { + if (ret) return ret; - } } *index = BTRFS_I(dir)->index_cnt; @@ -3879,12 +3874,13 @@ static noinline int uncompress_inline(struct btrfs_path *path, /* * a bit scary, this does extent mapping from logical file offset to the disk. - * the ugly parts come from merging extents from the disk with the - * in-ram representation. This gets more complex because of the data=ordered code, + * the ugly parts come from merging extents from the disk with the in-ram + * representation. This gets more complex because of the data=ordered code, * where the in-ram extents might be locked pending data=ordered completion. * * This also copies inline extents directly into the page. */ + struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, size_t pg_offset, u64 start, u64 len, int create) @@ -4081,7 +4077,7 @@ again: extent_map_end(em) - 1, GFP_NOFS); goto insert; } else { - printk("unkknown found_type %d\n", found_type); + printk(KERN_ERR "btrfs unknown found_type %d\n", found_type); WARN_ON(1); } not_found: @@ -4093,7 +4089,11 @@ not_found_em: insert: btrfs_release_path(root, path); if (em->start > start || extent_map_end(em) <= start) { - printk("bad extent! em: [%Lu %Lu] passed [%Lu %Lu]\n", em->start, em->len, start, len); + printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed " + "[%llu %llu]\n", (unsigned long long)em->start, + (unsigned long long)em->len, + (unsigned long long)start, + (unsigned long long)len); err = -EIO; goto out; } @@ -4130,8 +4130,6 @@ insert: } } else { err = -EIO; - printk("failing to insert %Lu %Lu\n", - start, len); free_extent_map(em); em = NULL; } @@ -4147,9 +4145,8 @@ out: btrfs_free_path(path); if (trans) { ret = btrfs_end_transaction(trans, root); - if (!err) { + if (!err) err = ret; - } } if (err) { free_extent_map(em); @@ -4482,13 +4479,15 @@ void btrfs_destroy_inode(struct inode *inode) } spin_unlock(&BTRFS_I(inode)->root->list_lock); - while(1) { + while (1) { ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); if (!ordered) break; else { - printk("found ordered extent %Lu %Lu\n", - ordered->file_offset, ordered->len); + printk(KERN_ERR "btrfs found ordered " + "extent %llu %llu on inode cleanup\n", + (unsigned long long)ordered->file_offset, + (unsigned long long)ordered->len); btrfs_remove_ordered_extent(inode, ordered); btrfs_put_ordered_extent(ordered); btrfs_put_ordered_extent(ordered); @@ -4572,8 +4571,8 @@ static int btrfs_getattr(struct vfsmount *mnt, return 0; } -static int btrfs_rename(struct inode * old_dir, struct dentry *old_dentry, - struct inode * new_dir,struct dentry *new_dentry) +static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry) { struct btrfs_trans_handle *trans; struct btrfs_root *root = BTRFS_I(old_dir)->root; @@ -4663,7 +4662,7 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root) return -EROFS; spin_lock(&root->fs_info->delalloc_lock); - while(!list_empty(head)) { + while (!list_empty(head)) { binode = list_entry(head->next, struct btrfs_inode, delalloc_inodes); inode = igrab(&binode->vfs_inode); @@ -4684,7 +4683,7 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root) * ordered extents get created before we return */ atomic_inc(&root->fs_info->async_submit_draining); - while(atomic_read(&root->fs_info->nr_async_submits) || + while (atomic_read(&root->fs_info->nr_async_submits) || atomic_read(&root->fs_info->async_delalloc_pages)) { wait_event(root->fs_info->async_submit_wait, (atomic_read(&root->fs_info->nr_async_submits) == 0 && diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index ba484aac1b9..c2aa33e3feb 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -311,7 +311,7 @@ static noinline int btrfs_mksubvol(struct path *parent, char *name, * to see if is references the subvolume where we are * placing this new snapshot. */ - while(1) { + while (1) { if (!test || dir == snap_src->fs_info->sb->s_root || test == snap_src->fs_info->sb->s_root || @@ -319,7 +319,8 @@ static noinline int btrfs_mksubvol(struct path *parent, char *name, break; } if (S_ISLNK(test->d_inode->i_mode)) { - printk("Symlink in snapshot path, failed\n"); + printk(KERN_INFO "Btrfs symlink in snapshot " + "path, failed\n"); error = -EMLINK; btrfs_free_path(path); goto out_drop_write; @@ -329,7 +330,8 @@ static noinline int btrfs_mksubvol(struct path *parent, char *name, ret = btrfs_find_root_ref(snap_src->fs_info->tree_root, path, test_oid, parent_oid); if (ret == 0) { - printk("Snapshot creation failed, looping\n"); + printk(KERN_INFO "Btrfs snapshot creation " + "failed, looping\n"); error = -EMLINK; btrfs_free_path(path); goto out_drop_write; @@ -617,7 +619,8 @@ static noinline int btrfs_ioctl_snap_create(struct file *file, src_inode = src_file->f_path.dentry->d_inode; if (src_inode->i_sb != file->f_path.dentry->d_inode->i_sb) { - printk("btrfs: Snapshot src from another FS\n"); + printk(KERN_INFO "btrfs: Snapshot src from " + "another FS\n"); ret = -EINVAL; fput(src_file); goto out; @@ -810,9 +813,6 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, ((off + len) & (bs-1))) goto out_unlock; - printk("final src extent is %llu~%llu\n", off, len); - printk("final dst extent is %llu~%llu\n", destoff, len); - /* do any pending delalloc/csum calc on src, one way or another, and lock file content */ while (1) { @@ -883,10 +883,13 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, comp = btrfs_file_extent_compression(leaf, extent); type = btrfs_file_extent_type(leaf, extent); if (type == BTRFS_FILE_EXTENT_REG) { - disko = btrfs_file_extent_disk_bytenr(leaf, extent); - diskl = btrfs_file_extent_disk_num_bytes(leaf, extent); + disko = btrfs_file_extent_disk_bytenr(leaf, + extent); + diskl = btrfs_file_extent_disk_num_bytes(leaf, + extent); datao = btrfs_file_extent_offset(leaf, extent); - datal = btrfs_file_extent_num_bytes(leaf, extent); + datal = btrfs_file_extent_num_bytes(leaf, + extent); } else if (type == BTRFS_FILE_EXTENT_INLINE) { /* take upper bound, may be compressed */ datal = btrfs_file_extent_ram_bytes(leaf, @@ -916,8 +919,6 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); - printk(" orig disk %llu~%llu data %llu~%llu\n", - disko, diskl, datao, datal); if (off > key.offset) { datao += off - key.offset; @@ -929,8 +930,6 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, /* disko == 0 means it's a hole */ if (!disko) datao = 0; - printk(" final disk %llu~%llu data %llu~%llu\n", - disko, diskl, datao, datal); btrfs_set_file_extent_offset(leaf, extent, datao); @@ -952,12 +951,11 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, skip = off - key.offset; new_key.offset += skip; } + if (key.offset + datal > off+len) trim = key.offset + datal - (off+len); - printk("len %lld skip %lld trim %lld\n", - datal, skip, trim); + if (comp && (skip || trim)) { - printk("btrfs clone_range can't split compressed inline extents yet\n"); ret = -EINVAL; goto out; } @@ -969,7 +967,8 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, goto out; if (skip) { - u32 start = btrfs_file_extent_calc_inline_size(0); + u32 start = + btrfs_file_extent_calc_inline_size(0); memmove(buf+start, buf+start+skip, datal); } @@ -985,7 +984,7 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, btrfs_mark_buffer_dirty(leaf); } - next: +next: btrfs_release_path(root, path); key.offset++; } diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index e30aa6e2958..39bae7761db 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -31,9 +31,10 @@ * difference in almost every workload, but spinning for the right amount of * time needs some help. * - * In general, we want to spin as long as the lock holder is doing btree searches, - * and we should give up if they are in more expensive code. + * In general, we want to spin as long as the lock holder is doing btree + * searches, and we should give up if they are in more expensive code. */ + int btrfs_tree_lock(struct extent_buffer *eb) { int i; diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index d9e232227da..a2094017027 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -39,11 +39,11 @@ static u64 entry_end(struct btrfs_ordered_extent *entry) static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, struct rb_node *node) { - struct rb_node ** p = &root->rb_node; - struct rb_node * parent = NULL; + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; struct btrfs_ordered_extent *entry; - while(*p) { + while (*p) { parent = *p; entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); @@ -67,13 +67,13 @@ static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, struct rb_node **prev_ret) { - struct rb_node * n = root->rb_node; + struct rb_node *n = root->rb_node; struct rb_node *prev = NULL; struct rb_node *test; struct btrfs_ordered_extent *entry; struct btrfs_ordered_extent *prev_entry = NULL; - while(n) { + while (n) { entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); prev = n; prev_entry = entry; @@ -88,7 +88,7 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, if (!prev_ret) return NULL; - while(prev && file_offset >= entry_end(prev_entry)) { + while (prev && file_offset >= entry_end(prev_entry)) { test = rb_next(prev); if (!test) break; @@ -102,7 +102,7 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, if (prev) prev_entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node); - while(prev && file_offset < entry_end(prev_entry)) { + while (prev && file_offset < entry_end(prev_entry)) { test = rb_prev(prev); if (!test) break; @@ -193,10 +193,8 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, node = tree_insert(&tree->tree, file_offset, &entry->rb_node); - if (node) { - printk("warning dup entry from add_ordered_extent\n"); - BUG(); - } + BUG_ON(node); + set_extent_ordered(&BTRFS_I(inode)->io_tree, file_offset, entry_end(entry) - 1, GFP_NOFS); @@ -282,7 +280,7 @@ int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) struct btrfs_ordered_sum *sum; if (atomic_dec_and_test(&entry->refs)) { - while(!list_empty(&entry->list)) { + while (!list_empty(&entry->list)) { cur = entry->list.next; sum = list_entry(cur, struct btrfs_ordered_sum, list); list_del(&sum->list); @@ -432,11 +430,10 @@ again: orig_end >> PAGE_CACHE_SHIFT); end = orig_end; - while(1) { + while (1) { ordered = btrfs_lookup_first_ordered_extent(inode, end); - if (!ordered) { + if (!ordered) break; - } if (ordered->file_offset > orig_end) { btrfs_put_ordered_extent(ordered); break; @@ -492,7 +489,7 @@ out: * if none is found */ struct btrfs_ordered_extent * -btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset) +btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset) { struct btrfs_ordered_inode_tree *tree; struct rb_node *node; @@ -553,7 +550,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, * yet */ node = &ordered->rb_node; - while(1) { + while (1) { node = rb_prev(node); if (!node) break; @@ -581,9 +578,8 @@ int btrfs_ordered_update_i_size(struct inode *inode, * between our ordered extent and the next one. */ test = rb_entry(node, struct btrfs_ordered_extent, rb_node); - if (test->file_offset > entry_end(ordered)) { + if (test->file_offset > entry_end(ordered)) i_size_test = test->file_offset; - } } else { i_size_test = i_size_read(inode); } diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c index 64725c13aa1..5f8f218c100 100644 --- a/fs/btrfs/print-tree.c +++ b/fs/btrfs/print-tree.c @@ -24,13 +24,14 @@ static void print_chunk(struct extent_buffer *eb, struct btrfs_chunk *chunk) { int num_stripes = btrfs_chunk_num_stripes(eb, chunk); int i; - printk("\t\tchunk length %llu owner %llu type %llu num_stripes %d\n", + printk(KERN_INFO "\t\tchunk length %llu owner %llu type %llu " + "num_stripes %d\n", (unsigned long long)btrfs_chunk_length(eb, chunk), (unsigned long long)btrfs_chunk_owner(eb, chunk), (unsigned long long)btrfs_chunk_type(eb, chunk), num_stripes); for (i = 0 ; i < num_stripes ; i++) { - printk("\t\t\tstripe %d devid %llu offset %llu\n", i, + printk(KERN_INFO "\t\t\tstripe %d devid %llu offset %llu\n", i, (unsigned long long)btrfs_stripe_devid_nr(eb, chunk, i), (unsigned long long)btrfs_stripe_offset_nr(eb, chunk, i)); } @@ -38,8 +39,8 @@ static void print_chunk(struct extent_buffer *eb, struct btrfs_chunk *chunk) static void print_dev_item(struct extent_buffer *eb, struct btrfs_dev_item *dev_item) { - printk("\t\tdev item devid %llu " - "total_bytes %llu bytes used %Lu\n", + printk(KERN_INFO "\t\tdev item devid %llu " + "total_bytes %llu bytes used %llu\n", (unsigned long long)btrfs_device_id(eb, dev_item), (unsigned long long)btrfs_device_total_bytes(eb, dev_item), (unsigned long long)btrfs_device_bytes_used(eb, dev_item)); @@ -61,14 +62,15 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l) struct btrfs_dev_extent *dev_extent; u32 type; - printk("leaf %llu total ptrs %d free space %d\n", + printk(KERN_INFO "leaf %llu total ptrs %d free space %d\n", (unsigned long long)btrfs_header_bytenr(l), nr, btrfs_leaf_free_space(root, l)); for (i = 0 ; i < nr ; i++) { item = btrfs_item_nr(l, i); btrfs_item_key_to_cpu(l, &key, i); type = btrfs_key_type(&key); - printk("\titem %d key (%llu %x %llu) itemoff %d itemsize %d\n", + printk(KERN_INFO "\titem %d key (%llu %x %llu) itemoff %d " + "itemsize %d\n", i, (unsigned long long)key.objectid, type, (unsigned long long)key.offset, @@ -76,33 +78,36 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l) switch (type) { case BTRFS_INODE_ITEM_KEY: ii = btrfs_item_ptr(l, i, struct btrfs_inode_item); - printk("\t\tinode generation %llu size %llu mode %o\n", - (unsigned long long)btrfs_inode_generation(l, ii), + printk(KERN_INFO "\t\tinode generation %llu size %llu " + "mode %o\n", + (unsigned long long) + btrfs_inode_generation(l, ii), (unsigned long long)btrfs_inode_size(l, ii), btrfs_inode_mode(l, ii)); break; case BTRFS_DIR_ITEM_KEY: di = btrfs_item_ptr(l, i, struct btrfs_dir_item); btrfs_dir_item_key_to_cpu(l, di, &found_key); - printk("\t\tdir oid %llu type %u\n", + printk(KERN_INFO "\t\tdir oid %llu type %u\n", (unsigned long long)found_key.objectid, btrfs_dir_type(l, di)); break; case BTRFS_ROOT_ITEM_KEY: ri = btrfs_item_ptr(l, i, struct btrfs_root_item); - printk("\t\troot data bytenr %llu refs %u\n", - (unsigned long long)btrfs_disk_root_bytenr(l, ri), + printk(KERN_INFO "\t\troot data bytenr %llu refs %u\n", + (unsigned long long) + btrfs_disk_root_bytenr(l, ri), btrfs_disk_root_refs(l, ri)); break; case BTRFS_EXTENT_ITEM_KEY: ei = btrfs_item_ptr(l, i, struct btrfs_extent_item); - printk("\t\textent data refs %u\n", + printk(KERN_INFO "\t\textent data refs %u\n", btrfs_extent_refs(l, ei)); break; case BTRFS_EXTENT_REF_KEY: ref = btrfs_item_ptr(l, i, struct btrfs_extent_ref); - printk("\t\textent back ref root %llu gen %llu " - "owner %llu num_refs %lu\n", + printk(KERN_INFO "\t\textent back ref root %llu " + "gen %llu owner %llu num_refs %lu\n", (unsigned long long)btrfs_ref_root(l, ref), (unsigned long long)btrfs_ref_generation(l, ref), (unsigned long long)btrfs_ref_objectid(l, ref), @@ -114,26 +119,36 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l) struct btrfs_file_extent_item); if (btrfs_file_extent_type(l, fi) == BTRFS_FILE_EXTENT_INLINE) { - printk("\t\tinline extent data size %u\n", - btrfs_file_extent_inline_len(l, fi)); + printk(KERN_INFO "\t\tinline extent data " + "size %u\n", + btrfs_file_extent_inline_len(l, fi)); break; } - printk("\t\textent data disk bytenr %llu nr %llu\n", - (unsigned long long)btrfs_file_extent_disk_bytenr(l, fi), - (unsigned long long)btrfs_file_extent_disk_num_bytes(l, fi)); - printk("\t\textent data offset %llu nr %llu ram %llu\n", - (unsigned long long)btrfs_file_extent_offset(l, fi), - (unsigned long long)btrfs_file_extent_num_bytes(l, fi), - (unsigned long long)btrfs_file_extent_ram_bytes(l, fi)); + printk(KERN_INFO "\t\textent data disk bytenr %llu " + "nr %llu\n", + (unsigned long long) + btrfs_file_extent_disk_bytenr(l, fi), + (unsigned long long) + btrfs_file_extent_disk_num_bytes(l, fi)); + printk(KERN_INFO "\t\textent data offset %llu " + "nr %llu ram %llu\n", + (unsigned long long) + btrfs_file_extent_offset(l, fi), + (unsigned long long) + btrfs_file_extent_num_bytes(l, fi), + (unsigned long long) + btrfs_file_extent_ram_bytes(l, fi)); break; case BTRFS_BLOCK_GROUP_ITEM_KEY: bi = btrfs_item_ptr(l, i, struct btrfs_block_group_item); - printk("\t\tblock group used %llu\n", - (unsigned long long)btrfs_disk_block_group_used(l, bi)); + printk(KERN_INFO "\t\tblock group used %llu\n", + (unsigned long long) + btrfs_disk_block_group_used(l, bi)); break; case BTRFS_CHUNK_ITEM_KEY: - print_chunk(l, btrfs_item_ptr(l, i, struct btrfs_chunk)); + print_chunk(l, btrfs_item_ptr(l, i, + struct btrfs_chunk)); break; case BTRFS_DEV_ITEM_KEY: print_dev_item(l, btrfs_item_ptr(l, i, @@ -142,7 +157,7 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l) case BTRFS_DEV_EXTENT_KEY: dev_extent = btrfs_item_ptr(l, i, struct btrfs_dev_extent); - printk("\t\tdev extent chunk_tree %llu\n" + printk(KERN_INFO "\t\tdev extent chunk_tree %llu\n" "\t\tchunk objectid %llu chunk offset %llu " "length %llu\n", (unsigned long long) @@ -171,13 +186,13 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c) btrfs_print_leaf(root, c); return; } - printk("node %llu level %d total ptrs %d free spc %u\n", + printk(KERN_INFO "node %llu level %d total ptrs %d free spc %u\n", (unsigned long long)btrfs_header_bytenr(c), btrfs_header_level(c), nr, (u32)BTRFS_NODEPTRS_PER_BLOCK(root) - nr); for (i = 0; i < nr; i++) { btrfs_node_key_to_cpu(c, &key, i); - printk("\tkey %d (%llu %u %llu) block %llu\n", + printk(KERN_INFO "\tkey %d (%llu %u %llu) block %llu\n", i, (unsigned long long)key.objectid, key.type, diff --git a/fs/btrfs/ref-cache.c b/fs/btrfs/ref-cache.c index a50ebb67055..6f0acc4c9ea 100644 --- a/fs/btrfs/ref-cache.c +++ b/fs/btrfs/ref-cache.c @@ -74,11 +74,11 @@ void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref) static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr, struct rb_node *node) { - struct rb_node ** p = &root->rb_node; - struct rb_node * parent = NULL; + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; struct btrfs_leaf_ref *entry; - while(*p) { + while (*p) { parent = *p; entry = rb_entry(parent, struct btrfs_leaf_ref, rb_node); @@ -98,10 +98,10 @@ static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr, static struct rb_node *tree_search(struct rb_root *root, u64 bytenr) { - struct rb_node * n = root->rb_node; + struct rb_node *n = root->rb_node; struct btrfs_leaf_ref *entry; - while(n) { + while (n) { entry = rb_entry(n, struct btrfs_leaf_ref, rb_node); WARN_ON(!entry->in_tree); @@ -127,7 +127,7 @@ int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen, return 0; spin_lock(&tree->lock); - while(!list_empty(&tree->list)) { + while (!list_empty(&tree->list)) { ref = list_entry(tree->list.next, struct btrfs_leaf_ref, list); BUG_ON(ref->tree != tree); if (ref->root_gen > max_root_gen) diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index f99335a999d..b48650de447 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c @@ -132,8 +132,9 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root if (ret != 0) { btrfs_print_leaf(root, path->nodes[0]); - printk("unable to update root key %Lu %u %Lu\n", - key->objectid, key->type, key->offset); + printk(KERN_CRIT "unable to update root key %llu %u %llu\n", + (unsigned long long)key->objectid, key->type, + (unsigned long long)key->offset); BUG_ON(1); } @@ -159,9 +160,9 @@ int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root /* * at mount time we want to find all the old transaction snapshots that were in - * the process of being deleted if we crashed. This is any root item with an offset - * lower than the latest root. They need to be queued for deletion to finish - * what was happening when we crashed. + * the process of being deleted if we crashed. This is any root item with an + * offset lower than the latest root. They need to be queued for deletion to + * finish what was happening when we crashed. */ int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid, struct btrfs_root *latest) @@ -188,7 +189,7 @@ again: ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto err; - while(1) { + while (1) { leaf = path->nodes[0]; nritems = btrfs_header_nritems(leaf); slot = path->slots[0]; @@ -258,11 +259,7 @@ int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, ret = btrfs_search_slot(trans, root, key, path, -1, 1); if (ret < 0) goto out; - if (ret) { -btrfs_print_leaf(root, path->nodes[0]); -printk("failed to del %Lu %u %Lu\n", key->objectid, key->type, key->offset); - } BUG_ON(ret != 0); leaf = path->nodes[0]; ri = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_item); diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c index 8d7f568009c..c0f7ecaf1e7 100644 --- a/fs/btrfs/struct-funcs.c +++ b/fs/btrfs/struct-funcs.c @@ -66,7 +66,7 @@ u##bits btrfs_##name(struct extent_buffer *eb, \ unsigned long map_len; \ u##bits res; \ err = map_extent_buffer(eb, offset, \ - sizeof(((type *)0)->member), \ + sizeof(((type *)0)->member), \ &map_token, &kaddr, \ &map_start, &map_len, KM_USER1); \ if (err) { \ @@ -103,7 +103,7 @@ void btrfs_set_##name(struct extent_buffer *eb, \ unsigned long map_start; \ unsigned long map_len; \ err = map_extent_buffer(eb, offset, \ - sizeof(((type *)0)->member), \ + sizeof(((type *)0)->member), \ &map_token, &kaddr, \ &map_start, &map_len, KM_USER1); \ if (err) { \ diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index ccdcb7bb7ad..b4c101d9322 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -55,18 +55,12 @@ static struct super_operations btrfs_super_ops; -static void btrfs_put_super (struct super_block * sb) +static void btrfs_put_super(struct super_block *sb) { struct btrfs_root *root = btrfs_sb(sb); int ret; ret = close_ctree(root); - if (ret) { - printk("close ctree returns %d\n", ret); - } -#if 0 - btrfs_sysfs_del_super(root->fs_info); -#endif sb->s_fs_info = NULL; } @@ -299,12 +293,12 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, return error; } -static int btrfs_fill_super(struct super_block * sb, +static int btrfs_fill_super(struct super_block *sb, struct btrfs_fs_devices *fs_devices, - void * data, int silent) + void *data, int silent) { - struct inode * inode; - struct dentry * root_dentry; + struct inode *inode; + struct dentry *root_dentry; struct btrfs_super_block *disk_super; struct btrfs_root *tree_root; struct btrfs_inode *bi; @@ -479,8 +473,10 @@ static int btrfs_get_sb(struct file_system_type *fs_type, int flags, root = dget(s->s_root); else { mutex_lock(&s->s_root->d_inode->i_mutex); - root = lookup_one_len(subvol_name, s->s_root, strlen(subvol_name)); + root = lookup_one_len(subvol_name, s->s_root, + strlen(subvol_name)); mutex_unlock(&s->s_root->d_inode->i_mutex); + if (IS_ERR(root)) { up_write(&s->s_umount); deactivate_super(s); @@ -557,8 +553,9 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) buf->f_bavail = buf->f_bfree; buf->f_bsize = dentry->d_sb->s_blocksize; buf->f_type = BTRFS_SUPER_MAGIC; + /* We treat it as constant endianness (it doesn't matter _which_) - because we want the fsid to come out the same whether mounted + because we want the fsid to come out the same whether mounted on a big-endian or little-endian host */ buf->f_fsid.val[0] = be32_to_cpu(fsid[0]) ^ be32_to_cpu(fsid[2]); buf->f_fsid.val[1] = be32_to_cpu(fsid[1]) ^ be32_to_cpu(fsid[3]); @@ -658,7 +655,7 @@ static int btrfs_interface_init(void) static void btrfs_interface_exit(void) { if (misc_deregister(&btrfs_misc) < 0) - printk("misc_deregister failed for control device"); + printk(KERN_INFO "misc_deregister failed for control device"); } static int __init init_btrfs_fs(void) diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 04087c02084..a240b6fa81d 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -67,7 +67,8 @@ struct btrfs_root_attr { }; #define ROOT_ATTR(name, mode, show, store) \ -static struct btrfs_root_attr btrfs_root_attr_##name = __ATTR(name, mode, show, store) +static struct btrfs_root_attr btrfs_root_attr_##name = __ATTR(name, mode, \ + show, store) ROOT_ATTR(blocks_used, 0444, root_blocks_used_show, NULL); ROOT_ATTR(block_limit, 0644, root_block_limit_show, NULL); @@ -86,7 +87,8 @@ struct btrfs_super_attr { }; #define SUPER_ATTR(name, mode, show, store) \ -static struct btrfs_super_attr btrfs_super_attr_##name = __ATTR(name, mode, show, store) +static struct btrfs_super_attr btrfs_super_attr_##name = __ATTR(name, mode, \ + show, store) SUPER_ATTR(blocks_used, 0444, super_blocks_used_show, NULL); SUPER_ATTR(total_blocks, 0444, super_total_blocks_show, NULL); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 4e7b56e9d3a..56ab1f5ea11 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -28,9 +28,6 @@ #include "ref-cache.h" #include "tree-log.h" -extern struct kmem_cache *btrfs_trans_handle_cachep; -extern struct kmem_cache *btrfs_transaction_cachep; - #define BTRFS_ROOT_TRANS_TAG 0 static noinline void put_transaction(struct btrfs_transaction *transaction) @@ -85,10 +82,10 @@ static noinline int join_transaction(struct btrfs_root *root) } /* - * this does all the record keeping required to make sure that a - * reference counted root is properly recorded in a given transaction. - * This is required to make sure the old root from before we joined the transaction - * is deleted when the transaction commits + * this does all the record keeping required to make sure that a reference + * counted root is properly recorded in a given transaction. This is required + * to make sure the old root from before we joined the transaction is deleted + * when the transaction commits */ noinline int btrfs_record_root_in_trans(struct btrfs_root *root) { @@ -144,7 +141,7 @@ static void wait_current_trans(struct btrfs_root *root) if (cur_trans && cur_trans->blocked) { DEFINE_WAIT(wait); cur_trans->use_count++; - while(1) { + while (1) { prepare_to_wait(&root->fs_info->transaction_wait, &wait, TASK_UNINTERRUPTIBLE); if (cur_trans->blocked) { @@ -213,7 +210,7 @@ static noinline int wait_for_commit(struct btrfs_root *root, { DEFINE_WAIT(wait); mutex_lock(&root->fs_info->trans_mutex); - while(!commit->commit_done) { + while (!commit->commit_done) { prepare_to_wait(&commit->commit_wait, &wait, TASK_UNINTERRUPTIBLE); if (commit->commit_done) @@ -228,8 +225,8 @@ static noinline int wait_for_commit(struct btrfs_root *root, } /* - * rate limit against the drop_snapshot code. This helps to slow down new operations - * if the drop_snapshot code isn't able to keep up. + * rate limit against the drop_snapshot code. This helps to slow down new + * operations if the drop_snapshot code isn't able to keep up. */ static void throttle_on_drops(struct btrfs_root *root) { @@ -332,12 +329,12 @@ int btrfs_write_and_wait_marked_extents(struct btrfs_root *root, u64 end; unsigned long index; - while(1) { + while (1) { ret = find_first_extent_bit(dirty_pages, start, &start, &end, EXTENT_DIRTY); if (ret) break; - while(start <= end) { + while (start <= end) { cond_resched(); index = start >> PAGE_CACHE_SHIFT; @@ -368,14 +365,14 @@ int btrfs_write_and_wait_marked_extents(struct btrfs_root *root, page_cache_release(page); } } - while(1) { + while (1) { ret = find_first_extent_bit(dirty_pages, 0, &start, &end, EXTENT_DIRTY); if (ret) break; clear_extent_dirty(dirty_pages, start, end, GFP_NOFS); - while(start <= end) { + while (start <= end) { index = start >> PAGE_CACHE_SHIFT; start = (u64)(index + 1) << PAGE_CACHE_SHIFT; page = find_get_page(btree_inode->i_mapping, index); @@ -431,7 +428,7 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans, btrfs_write_dirty_block_groups(trans, root); btrfs_extent_post_op(trans, root); - while(1) { + while (1) { old_root_bytenr = btrfs_root_bytenr(&root->root_item); if (old_root_bytenr == root->node->start) break; @@ -472,7 +469,7 @@ int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans, btrfs_extent_post_op(trans, fs_info->tree_root); - while(!list_empty(&fs_info->dirty_cowonly_roots)) { + while (!list_empty(&fs_info->dirty_cowonly_roots)) { next = fs_info->dirty_cowonly_roots.next; list_del_init(next); root = list_entry(next, struct btrfs_root, dirty_list); @@ -521,7 +518,7 @@ static noinline int add_dirty_roots(struct btrfs_trans_handle *trans, int err = 0; u32 refs; - while(1) { + while (1) { ret = radix_tree_gang_lookup_tag(radix, (void **)gang, 0, ARRAY_SIZE(gang), BTRFS_ROOT_TRANS_TAG); @@ -653,7 +650,7 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root, int ret = 0; int err; - while(!list_empty(list)) { + while (!list_empty(list)) { struct btrfs_root *root; dirty = list_entry(list->prev, struct btrfs_dirty_root, list); @@ -663,13 +660,12 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root, root = dirty->latest_root; atomic_inc(&root->fs_info->throttles); - while(1) { + while (1) { trans = btrfs_start_transaction(tree_root, 1); mutex_lock(&root->fs_info->drop_mutex); ret = btrfs_drop_snapshot(trans, dirty->root); - if (ret != -EAGAIN) { + if (ret != -EAGAIN) break; - } mutex_unlock(&root->fs_info->drop_mutex); err = btrfs_update_root(trans, @@ -874,7 +870,7 @@ static noinline int finish_pending_snapshots(struct btrfs_trans_handle *trans, struct list_head *head = &trans->transaction->pending_snapshots; int ret; - while(!list_empty(head)) { + while (!list_empty(head)) { pending = list_entry(head->next, struct btrfs_pending_snapshot, list); ret = finish_pending_snapshot(fs_info, pending); @@ -1076,9 +1072,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, kmem_cache_free(btrfs_trans_handle_cachep, trans); - if (root->fs_info->closing) { + if (root->fs_info->closing) drop_dirty_roots(root->fs_info->tree_root, &dirty_fs_roots); - } return ret; } diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index ffe7f639732..ea292117f88 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -66,9 +66,9 @@ static inline void btrfs_set_trans_block_group(struct btrfs_trans_handle *trans, trans->block_group = BTRFS_I(inode)->block_group; } -static inline void btrfs_update_inode_block_group(struct - btrfs_trans_handle *trans, - struct inode *inode) +static inline void btrfs_update_inode_block_group( + struct btrfs_trans_handle *trans, + struct inode *inode) { BTRFS_I(inode)->block_group = trans->block_group; } diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c index a6a3956cedf..3e8358c3616 100644 --- a/fs/btrfs/tree-defrag.c +++ b/fs/btrfs/tree-defrag.c @@ -23,10 +23,11 @@ #include "transaction.h" #include "locking.h" -/* defrag all the leaves in a given btree. If cache_only == 1, don't read things - * from disk, otherwise read all the leaves and try to get key order to +/* defrag all the leaves in a given btree. If cache_only == 1, don't read + * things from disk, otherwise read all the leaves and try to get key order to * better reflect disk order */ + int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, struct btrfs_root *root, int cache_only) { @@ -65,9 +66,9 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, level = btrfs_header_level(root->node); orig_level = level; - if (level == 0) { + if (level == 0) goto out; - } + if (root->defrag_progress.objectid == 0) { struct extent_buffer *root_node; u32 nritems; diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index b1c2921f5be..3a72a1b6c24 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -829,7 +829,7 @@ conflict_again: */ ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]); - while(ptr < ptr_end) { + while (ptr < ptr_end) { victim_ref = (struct btrfs_inode_ref *)ptr; victim_name_len = btrfs_inode_ref_name_len(leaf, victim_ref); @@ -938,9 +938,8 @@ static noinline int replay_one_csum(struct btrfs_trans_handle *trans, file_bytes = (item_size / csum_size) * root->sectorsize; sums = kzalloc(btrfs_ordered_sum_size(root, file_bytes), GFP_NOFS); - if (!sums) { + if (!sums) return -ENOMEM; - } INIT_LIST_HEAD(&sums->list); sums->len = file_bytes; @@ -952,7 +951,7 @@ static noinline int replay_one_csum(struct btrfs_trans_handle *trans, sector_sum = sums->sums; cur_offset = key->offset; ptr = btrfs_item_ptr_offset(eb, slot); - while(item_size > 0) { + while (item_size > 0) { sector_sum->bytenr = cur_offset; read_extent_buffer(eb, §or_sum->sum, ptr, csum_size); sector_sum++; @@ -995,7 +994,7 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, path = btrfs_alloc_path(); - while(1) { + while (1) { ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) break; @@ -1012,7 +1011,7 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); ptr_end = ptr + btrfs_item_size_nr(path->nodes[0], path->slots[0]); - while(ptr < ptr_end) { + while (ptr < ptr_end) { struct btrfs_inode_ref *ref; ref = (struct btrfs_inode_ref *)ptr; @@ -1048,7 +1047,7 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; key.type = BTRFS_ORPHAN_ITEM_KEY; key.offset = (u64)-1; - while(1) { + while (1) { ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret < 0) break; @@ -1206,8 +1205,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, if (key->type == BTRFS_DIR_ITEM_KEY) { dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid, name, name_len, 1); - } - else if (key->type == BTRFS_DIR_INDEX_KEY) { + } else if (key->type == BTRFS_DIR_INDEX_KEY) { dst_di = btrfs_lookup_dir_index_item(trans, root, path, key->objectid, key->offset, name, @@ -1282,7 +1280,7 @@ static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans, ptr = btrfs_item_ptr_offset(eb, slot); ptr_end = ptr + item_size; - while(ptr < ptr_end) { + while (ptr < ptr_end) { di = (struct btrfs_dir_item *)ptr; name_len = btrfs_dir_name_len(eb, di); ret = replay_one_name(trans, root, path, eb, di, key); @@ -1408,7 +1406,7 @@ again: item_size = btrfs_item_size_nr(eb, slot); ptr = btrfs_item_ptr_offset(eb, slot); ptr_end = ptr + item_size; - while(ptr < ptr_end) { + while (ptr < ptr_end) { di = (struct btrfs_dir_item *)ptr; name_len = btrfs_dir_name_len(eb, di); name = kmalloc(name_len, GFP_NOFS); @@ -1513,14 +1511,14 @@ static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, again: range_start = 0; range_end = 0; - while(1) { + while (1) { ret = find_dir_range(log, path, dirid, key_type, &range_start, &range_end); if (ret != 0) break; dir_key.offset = range_start; - while(1) { + while (1) { int nritems; ret = btrfs_search_slot(NULL, root, &dir_key, path, 0, 0); @@ -1676,7 +1674,7 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, return 0; } -static int noinline walk_down_log_tree(struct btrfs_trans_handle *trans, +static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int *level, struct walk_control *wc) @@ -1694,7 +1692,7 @@ static int noinline walk_down_log_tree(struct btrfs_trans_handle *trans, WARN_ON(*level < 0); WARN_ON(*level >= BTRFS_MAX_LEVEL); - while(*level > 0) { + while (*level > 0) { WARN_ON(*level < 0); WARN_ON(*level >= BTRFS_MAX_LEVEL); cur = path->nodes[*level]; @@ -1753,11 +1751,11 @@ static int noinline walk_down_log_tree(struct btrfs_trans_handle *trans, WARN_ON(*level < 0); WARN_ON(*level >= BTRFS_MAX_LEVEL); - if (path->nodes[*level] == root->node) { + if (path->nodes[*level] == root->node) parent = path->nodes[*level]; - } else { + else parent = path->nodes[*level + 1]; - } + bytenr = path->nodes[*level]->start; blocksize = btrfs_level_size(root, *level); @@ -1790,7 +1788,7 @@ static int noinline walk_down_log_tree(struct btrfs_trans_handle *trans, return 0; } -static int noinline walk_up_log_tree(struct btrfs_trans_handle *trans, +static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int *level, struct walk_control *wc) @@ -1801,7 +1799,7 @@ static int noinline walk_up_log_tree(struct btrfs_trans_handle *trans, int slot; int ret; - for(i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) { + for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) { slot = path->slots[i]; if (slot < btrfs_header_nritems(path->nodes[i]) - 1) { struct extent_buffer *node; @@ -1875,7 +1873,7 @@ static int walk_log_tree(struct btrfs_trans_handle *trans, extent_buffer_get(log->node); path->slots[level] = 0; - while(1) { + while (1) { wret = walk_down_log_tree(trans, log, path, &level, wc); if (wret > 0) break; @@ -1941,7 +1939,7 @@ static int wait_log_commit(struct btrfs_root *log) schedule(); finish_wait(&log->fs_info->tree_log_wait, &wait); mutex_lock(&log->fs_info->tree_log_mutex); - } while(transid == log->fs_info->tree_log_transid && + } while (transid == log->fs_info->tree_log_transid && atomic_read(&log->fs_info->tree_log_commit)); return 0; } @@ -1965,13 +1963,13 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, } atomic_set(&log->fs_info->tree_log_commit, 1); - while(1) { + while (1) { batch = log->fs_info->tree_log_batch; mutex_unlock(&log->fs_info->tree_log_mutex); schedule_timeout_uninterruptible(1); mutex_lock(&log->fs_info->tree_log_mutex); - while(atomic_read(&log->fs_info->tree_log_writers)) { + while (atomic_read(&log->fs_info->tree_log_writers)) { DEFINE_WAIT(wait); prepare_to_wait(&log->fs_info->tree_log_wait, &wait, TASK_UNINTERRUPTIBLE); @@ -2030,7 +2028,7 @@ int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root) ret = walk_log_tree(trans, log, &wc); BUG_ON(ret); - while(1) { + while (1) { ret = find_first_extent_bit(&log->dirty_log_pages, 0, &start, &end, EXTENT_DIRTY); if (ret) @@ -2287,9 +2285,8 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, struct btrfs_key tmp; btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); - if (key_type == tmp.type) { + if (key_type == tmp.type) first_offset = max(min_offset, tmp.offset) + 1; - } } goto done; } @@ -2319,7 +2316,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, * we have a block from this transaction, log every item in it * from our directory */ - while(1) { + while (1) { struct btrfs_key tmp; src = path->nodes[0]; nritems = btrfs_header_nritems(src); @@ -2396,7 +2393,7 @@ static noinline int log_directory_changes(struct btrfs_trans_handle *trans, again: min_key = 0; max_key = 0; - while(1) { + while (1) { ret = log_dir_items(trans, root, inode, path, dst_path, key_type, min_key, &max_key); @@ -2432,7 +2429,7 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans, key.type = max_key_type; key.offset = (u64)-1; - while(1) { + while (1) { ret = btrfs_search_slot(trans, log, &key, path, -1, 1); if (ret != 1) @@ -2481,7 +2478,7 @@ static noinline int copy_extent_csums(struct btrfs_trans_handle *trans, list_add_tail(&sums->list, list); path = btrfs_alloc_path(); - while(disk_bytenr < end) { + while (disk_bytenr < end) { if (!item || disk_bytenr < item_start_offset || disk_bytenr >= item_last_offset) { struct btrfs_key found_key; @@ -2496,7 +2493,8 @@ static noinline int copy_extent_csums(struct btrfs_trans_handle *trans, if (ret == -ENOENT || ret == -EFBIG) ret = 0; sum = 0; - printk("log no csum found for byte %llu\n", + printk(KERN_INFO "log no csum found for " + "byte %llu\n", (unsigned long long)disk_bytenr); item = NULL; btrfs_release_path(root, path); @@ -2643,7 +2641,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, * we have to do this after the loop above to avoid changing the * log tree while trying to change the log tree. */ - while(!list_empty(&ordered_sums)) { + while (!list_empty(&ordered_sums)) { struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, struct btrfs_ordered_sum, list); @@ -2736,7 +2734,7 @@ static int __btrfs_log_inode(struct btrfs_trans_handle *trans, BUG_ON(ret); path->keep_locks = 1; - while(1) { + while (1) { ins_nr = 0; ret = btrfs_search_forward(root, &min_key, &max_key, path, 0, trans->transid); @@ -2848,7 +2846,7 @@ int btrfs_log_dentry(struct btrfs_trans_handle *trans, start_log_trans(trans, root); sb = dentry->d_inode->i_sb; - while(1) { + while (1) { ret = __btrfs_log_inode(trans, root, dentry->d_inode, inode_only); BUG_ON(ret); @@ -2919,7 +2917,7 @@ again: key.offset = (u64)-1; btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY); - while(1) { + while (1) { ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0); if (ret < 0) break; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 6672adcec9f..b187b537888 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -140,7 +140,7 @@ static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid) * the list if the block device is congested. This way, multiple devices * can make progress from a single worker thread. */ -static int noinline run_scheduled_bios(struct btrfs_device *device) +static noinline int run_scheduled_bios(struct btrfs_device *device) { struct bio *pending; struct backing_dev_info *bdi; @@ -187,7 +187,7 @@ loop: } spin_unlock(&device->io_lock); - while(pending) { + while (pending) { cur = pending; pending = pending->bi_next; cur->bi_next = NULL; @@ -458,7 +458,7 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, bdev = open_bdev_exclusive(device->name, flags, holder); if (IS_ERR(bdev)) { - printk("open %s failed\n", device->name); + printk(KERN_INFO "open %s failed\n", device->name); goto error; } set_blocksize(bdev, 4096); @@ -570,14 +570,15 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, devid = le64_to_cpu(disk_super->dev_item.devid); transid = btrfs_super_generation(disk_super); if (disk_super->label[0]) - printk("device label %s ", disk_super->label); + printk(KERN_INFO "device label %s ", disk_super->label); else { /* FIXME, make a readl uuid parser */ - printk("device fsid %llx-%llx ", + printk(KERN_INFO "device fsid %llx-%llx ", *(unsigned long long *)disk_super->fsid, *(unsigned long long *)(disk_super->fsid + 8)); } - printk("devid %Lu transid %Lu %s\n", devid, transid, path); + printk(KERN_INFO "devid %llu transid %llu %s\n", + (unsigned long long)devid, (unsigned long long)transid, path); ret = device_list_add(path, disk_super, devid, fs_devices_ret); brelse(bh); @@ -683,9 +684,8 @@ no_more_items: goto check_pending; } } - if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) { + if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) goto next; - } start_found = 1; dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); @@ -1001,14 +1001,16 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && root->fs_info->fs_devices->rw_devices <= 4) { - printk("btrfs: unable to go below four devices on raid10\n"); + printk(KERN_ERR "btrfs: unable to go below four devices " + "on raid10\n"); ret = -EINVAL; goto out; } if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && root->fs_info->fs_devices->rw_devices <= 2) { - printk("btrfs: unable to go below two devices on raid1\n"); + printk(KERN_ERR "btrfs: unable to go below two " + "devices on raid1\n"); ret = -EINVAL; goto out; } @@ -1031,7 +1033,8 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) bh = NULL; disk_super = NULL; if (!device) { - printk("btrfs: no missing devices found to remove\n"); + printk(KERN_ERR "btrfs: no missing devices found to " + "remove\n"); goto out; } } else { @@ -1060,7 +1063,8 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) } if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) { - printk("btrfs: unable to remove the only writeable device\n"); + printk(KERN_ERR "btrfs: unable to remove the only writeable " + "device\n"); ret = -EINVAL; goto error_brelse; } @@ -1286,9 +1290,8 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) return -EINVAL; bdev = open_bdev_exclusive(device_path, 0, root->fs_info->bdev_holder); - if (!bdev) { + if (!bdev) return -EIO; - } if (root->fs_info->fs_devices->seeding) { seeding_dev = 1; @@ -1401,8 +1404,8 @@ error: goto out; } -static int noinline btrfs_update_device(struct btrfs_trans_handle *trans, - struct btrfs_device *device) +static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, + struct btrfs_device *device) { int ret; struct btrfs_path *path; @@ -1563,7 +1566,7 @@ static int btrfs_relocate_chunk(struct btrfs_root *root, int ret; int i; - printk("btrfs relocating chunk %llu\n", + printk(KERN_INFO "btrfs relocating chunk %llu\n", (unsigned long long)chunk_offset); root = root->fs_info->chunk_root; extent_root = root->fs_info->extent_root; @@ -1748,7 +1751,7 @@ int btrfs_balance(struct btrfs_root *dev_root) key.offset = (u64)-1; key.type = BTRFS_CHUNK_ITEM_KEY; - while(1) { + while (1) { ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); if (ret < 0) goto error; @@ -1916,7 +1919,7 @@ static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans, return 0; } -static u64 noinline chunk_bytes_by_type(u64 type, u64 calc_size, +static noinline u64 chunk_bytes_by_type(u64 type, u64 calc_size, int num_stripes, int sub_stripes) { if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP)) @@ -2041,7 +2044,7 @@ again: min_free += 1024 * 1024; INIT_LIST_HEAD(&private_devs); - while(index < num_stripes) { + while (index < num_stripes) { device = list_entry(cur, struct btrfs_device, dev_alloc_list); BUG_ON(!device->writeable); if (device->total_bytes > device->bytes_used) @@ -2242,7 +2245,7 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, return 0; } -static int noinline init_first_rw_device(struct btrfs_trans_handle *trans, +static noinline int init_first_rw_device(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_device *device) { @@ -2338,7 +2341,7 @@ void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree) { struct extent_map *em; - while(1) { + while (1) { spin_lock(&tree->map_tree.lock); em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1); if (em) @@ -2413,9 +2416,8 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, int max_errors = 0; struct btrfs_multi_bio *multi = NULL; - if (multi_ret && !(rw & (1 << BIO_RW))) { + if (multi_ret && !(rw & (1 << BIO_RW))) stripes_allocated = 1; - } again: if (multi_ret) { multi = kzalloc(btrfs_multi_bio_size(stripes_allocated), @@ -2434,7 +2436,9 @@ again: return 0; if (!em) { - printk("unable to find logical %Lu len %Lu\n", logical, *length); + printk(KERN_CRIT "unable to find logical %llu len %llu\n", + (unsigned long long)logical, + (unsigned long long)*length); BUG(); } @@ -2541,9 +2545,8 @@ again: device = map->stripes[stripe_index].dev; if (device->bdev) { bdi = blk_get_backing_dev_info(device->bdev); - if (bdi->unplug_io_fn) { + if (bdi->unplug_io_fn) bdi->unplug_io_fn(bdi, unplug_page); - } } } else { multi->stripes[i].physical = @@ -2717,7 +2720,7 @@ struct async_sched { * This will add one bio to the pending list for a device and make sure * the work struct is scheduled. */ -static int noinline schedule_bio(struct btrfs_root *root, +static noinline int schedule_bio(struct btrfs_root *root, struct btrfs_device *device, int rw, struct bio *bio) { @@ -2785,8 +2788,10 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, total_devs = multi->num_stripes; if (map_length < length) { - printk("mapping failed logical %Lu bio len %Lu " - "len %Lu\n", logical, length, map_length); + printk(KERN_CRIT "mapping failed logical %llu bio len %llu " + "len %llu\n", (unsigned long long)logical, + (unsigned long long)length, + (unsigned long long)map_length); BUG(); } multi->end_io = first_bio->bi_end_io; @@ -2794,7 +2799,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, multi->orig_bio = first_bio; atomic_set(&multi->stripes_pending, multi->num_stripes); - while(dev_nr < total_devs) { + while (dev_nr < total_devs) { if (total_devs > 1) { if (dev_nr < total_devs - 1) { bio = bio_clone(first_bio, GFP_NOFS); @@ -3058,7 +3063,8 @@ static int read_one_dev(struct btrfs_root *root, return -EIO; if (!device) { - printk("warning devid %Lu missing\n", devid); + printk(KERN_WARNING "warning devid %llu missing\n", + (unsigned long long)devid); device = add_missing_dev(root, devid, dev_uuid); if (!device) return -ENOMEM; @@ -3078,12 +3084,6 @@ static int read_one_dev(struct btrfs_root *root, if (device->writeable) device->fs_devices->total_rw_bytes += device->total_bytes; ret = 0; -#if 0 - ret = btrfs_open_device(device); - if (ret) { - kfree(device); - } -#endif return ret; } @@ -3174,7 +3174,7 @@ int btrfs_read_chunk_tree(struct btrfs_root *root) key.type = 0; again: ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); - while(1) { + while (1) { leaf = path->nodes[0]; slot = path->slots[0]; if (slot >= btrfs_header_nritems(leaf)) { diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c index 4146f0710e6..7f332e27089 100644 --- a/fs/btrfs/xattr.c +++ b/fs/btrfs/xattr.c @@ -264,7 +264,8 @@ struct xattr_handler *btrfs_xattr_handlers[] = { */ static bool btrfs_is_valid_xattr(const char *name) { - return !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) || + return !strncmp(name, XATTR_SECURITY_PREFIX, + XATTR_SECURITY_PREFIX_LEN) || !strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) || !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) || !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN); diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index c4617cde6c7..ecfbce836d3 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -152,7 +152,7 @@ static int free_workspace(struct workspace *workspace) static void free_workspaces(void) { struct workspace *workspace; - while(!list_empty(&idle_workspace)) { + while (!list_empty(&idle_workspace)) { workspace = list_entry(idle_workspace.next, struct workspace, list); list_del(&workspace->list); @@ -397,12 +397,10 @@ int btrfs_zlib_decompress_biovec(struct page **pages_in, ret = -1; goto out; } - while(workspace->inf_strm.total_in < srclen) { + while (workspace->inf_strm.total_in < srclen) { ret = zlib_inflate(&workspace->inf_strm, Z_NO_FLUSH); - if (ret != Z_OK && ret != Z_STREAM_END) { + if (ret != Z_OK && ret != Z_STREAM_END) break; - } - /* * buf start is the byte offset we're of the start of * our workspace buffer @@ -424,16 +422,14 @@ int btrfs_zlib_decompress_biovec(struct page **pages_in, /* we didn't make progress in this inflate * call, we're done */ - if (ret != Z_STREAM_END) { + if (ret != Z_STREAM_END) ret = -1; - } break; } /* we haven't yet hit data corresponding to this page */ - if (total_out <= start_byte) { + if (total_out <= start_byte) goto next; - } /* * the start of the data we care about is offset into @@ -448,7 +444,7 @@ int btrfs_zlib_decompress_biovec(struct page **pages_in, current_buf_start = buf_start; /* copy bytes from the working buffer into the pages */ - while(working_bytes > 0) { + while (working_bytes > 0) { bytes = min(PAGE_CACHE_SIZE - pg_offset, PAGE_CACHE_SIZE - buf_offset); bytes = min(bytes, working_bytes); @@ -471,6 +467,7 @@ int btrfs_zlib_decompress_biovec(struct page **pages_in, ret = 0; goto done; } + page_out = bvec[page_out_index].bv_page; pg_offset = 0; page_bytes_left = PAGE_CACHE_SIZE; @@ -480,9 +477,8 @@ int btrfs_zlib_decompress_biovec(struct page **pages_in, * make sure our new page is covered by this * working buffer */ - if (total_out <= start_byte) { + if (total_out <= start_byte) goto next; - } /* the next page in the biovec might not * be adjacent to the last page, but it @@ -517,11 +513,10 @@ next: PAGE_CACHE_SIZE); } } - if (ret != Z_STREAM_END) { + if (ret != Z_STREAM_END) ret = -1; - } else { + else ret = 0; - } done: zlib_inflateEnd(&workspace->inf_strm); if (data_in) @@ -579,16 +574,15 @@ int btrfs_zlib_decompress(unsigned char *data_in, goto out; } - while(bytes_left > 0) { + while (bytes_left > 0) { unsigned long buf_start; unsigned long buf_offset; unsigned long bytes; unsigned long pg_offset = 0; ret = zlib_inflate(&workspace->inf_strm, Z_NO_FLUSH); - if (ret != Z_OK && ret != Z_STREAM_END) { + if (ret != Z_OK && ret != Z_STREAM_END) break; - } buf_start = total_out; total_out = workspace->inf_strm.total_out; @@ -598,15 +592,13 @@ int btrfs_zlib_decompress(unsigned char *data_in, break; } - if (total_out <= start_byte) { + if (total_out <= start_byte) goto next; - } - if (total_out > start_byte && buf_start < start_byte) { + if (total_out > start_byte && buf_start < start_byte) buf_offset = start_byte - buf_start; - } else { + else buf_offset = 0; - } bytes = min(PAGE_CACHE_SIZE - pg_offset, PAGE_CACHE_SIZE - buf_offset); @@ -622,11 +614,12 @@ next: workspace->inf_strm.next_out = workspace->buf; workspace->inf_strm.avail_out = PAGE_CACHE_SIZE; } - if (ret != Z_STREAM_END && bytes_left != 0) { + + if (ret != Z_STREAM_END && bytes_left != 0) ret = -1; - } else { + else ret = 0; - } + zlib_inflateEnd(&workspace->inf_strm); out: free_workspace(workspace); -- cgit v1.2.3-70-g09d2