summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-07-26 14:48:55 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-26 14:48:55 -0700
commite2aed8dfa50bb061747eeb14e6af099554a03b76 (patch)
tree900c96a2dfe7195e56ec3c1f027418029d0a8444
parent476525004ac7e2f990b6956efcd44d0780c2ab4c (diff)
parentb24baf6917a376420d535548e1f88744028bcf24 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
Pull large btrfs update from Chris Mason: "This pull request is very large, and the two main features in here have been under testing/devel for quite a while. We have subvolume quotas from the strato developers. This enables full tracking of how many blocks are allocated to each subvolume (and all snapshots) and you can set limits on a per-subvolume basis. You can also create quota groups and toss multiple subvolumes into a big group. It's everything you need to be a web hosting company and give each user their own subvolume. The userland side of the quotas is being refreshed, they'll send out details on where to grab it soon. Next is the kernel side of btrfs send/receive from Alexander Block. This leverages the same infrastructure as the quota code to figure out relationships between blocks and their owners. It can then compute the difference between two snapshots and sends the diffs in a neutral format into userland. The basic model: create a snapshot send that snapshot as the initial backup make changes create a second snapshot send the incremental as a backup delete the first snapshot (use the second snapshot for the next incremental) The receive portion is all in userland, and in the 'next' branch of my btrfs-progs repo. There's still some work to do in terms of optimizing the send side from kernel to userland. The really important part is figuring out how two snapshots are different, and this is where we are concentrating right now. The initial send of a dataset is a little slower than tar, but the incremental sends are dramatically faster than what rsync can do. On top of all of that, we have a nice queue of fixes, cleanups and optimizations." Fix up trivial modify/del conflict in fs/btrfs/ioctl.c Also fix up semantic conflict in fs/btrfs/send.c: the interface to dentry_open() changed in commit 765927b2d508 ("switch dentry_open() to struct path, make it grab references itself"), and since it now grabs whatever references it needs, we should no longer do the mntget() on the mnt (and we need to dput() the dentry reference we took). * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: (65 commits) Btrfs: uninit variable fixes in send/receive Btrfs: introduce BTRFS_IOC_SEND for btrfs send/receive Btrfs: add btrfs_compare_trees function Btrfs: introduce subvol uuids and times Btrfs: make iref_to_path non static Btrfs: add a barrier before a waitqueue_active check Btrfs: call the ordered free operation without any locks held Btrfs: Check INCOMPAT flags on remount and add helper function Btrfs: add helper for tree enumeration btrfs: allow cross-subvolume file clone Btrfs: improve multi-thread buffer read Btrfs: make btrfs's allocation smoothly with preallocation Btrfs: lock the transition from dirty to writeback for an eb Btrfs: fix potential race in extent buffer freeing Btrfs: don't return true in releasepage unless we actually freed the eb Btrfs: suppress printk() if all device I/O stats are zero Btrfs: remove unwanted printk() for btrfs device I/O stats Btrfs: rewrite BTRFS_SETGET_FUNCS Btrfs: zero unused bytes in inode item Btrfs: kill free_space pointer from inode structure ... Conflicts: fs/btrfs/ioctl.c
-rw-r--r--fs/btrfs/Makefile2
-rw-r--r--fs/btrfs/async-thread.c9
-rw-r--r--fs/btrfs/backref.c40
-rw-r--r--fs/btrfs/backref.h7
-rw-r--r--fs/btrfs/btrfs_inode.h14
-rw-r--r--fs/btrfs/check-integrity.c7
-rw-r--r--fs/btrfs/ctree.c775
-rw-r--r--fs/btrfs/ctree.h368
-rw-r--r--fs/btrfs/delayed-inode.c23
-rw-r--r--fs/btrfs/delayed-inode.h2
-rw-r--r--fs/btrfs/delayed-ref.c56
-rw-r--r--fs/btrfs/delayed-ref.h62
-rw-r--r--fs/btrfs/disk-io.c150
-rw-r--r--fs/btrfs/disk-io.h6
-rw-r--r--fs/btrfs/extent-tree.c358
-rw-r--r--fs/btrfs/extent_io.c58
-rw-r--r--fs/btrfs/file-item.c4
-rw-r--r--fs/btrfs/free-space-cache.c2
-rw-r--r--fs/btrfs/inode.c42
-rw-r--r--fs/btrfs/ioctl.c467
-rw-r--r--fs/btrfs/ioctl.h97
-rw-r--r--fs/btrfs/locking.c14
-rw-r--r--fs/btrfs/qgroup.c1571
-rw-r--r--fs/btrfs/relocation.c3
-rw-r--r--fs/btrfs/root-tree.c107
-rw-r--r--fs/btrfs/send.c4571
-rw-r--r--fs/btrfs/send.h133
-rw-r--r--fs/btrfs/struct-funcs.c196
-rw-r--r--fs/btrfs/super.c28
-rw-r--r--fs/btrfs/transaction.c101
-rw-r--r--fs/btrfs/transaction.h12
-rw-r--r--fs/btrfs/tree-log.c4
-rw-r--r--fs/btrfs/volumes.c25
-rw-r--r--fs/btrfs/volumes.h4
-rw-r--r--fs/inode.c2
35 files changed, 8689 insertions, 631 deletions
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
index 0c4fa2befae..d7fcdba141a 100644
--- a/fs/btrfs/Makefile
+++ b/fs/btrfs/Makefile
@@ -8,7 +8,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
export.o tree-log.o free-space-cache.o zlib.o lzo.o \
compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
- reada.o backref.o ulist.o
+ reada.o backref.o ulist.o qgroup.o send.o
btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 42704149b72..58b7d14b08e 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -206,10 +206,17 @@ static noinline void run_ordered_completions(struct btrfs_workers *workers,
work->ordered_func(work);
- /* now take the lock again and call the freeing code */
+ /* now take the lock again and drop our item from the list */
spin_lock(&workers->order_lock);
list_del(&work->order_list);
+ spin_unlock(&workers->order_lock);
+
+ /*
+ * we don't want to call the ordered free functions
+ * with the lock held though
+ */
work->ordered_free(work);
+ spin_lock(&workers->order_lock);
}
spin_unlock(&workers->order_lock);
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index a383c18e74e..a256f3b2a84 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -773,9 +773,8 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
*/
static int find_parent_nodes(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr,
- u64 delayed_ref_seq, u64 time_seq,
- struct ulist *refs, struct ulist *roots,
- const u64 *extent_item_pos)
+ u64 time_seq, struct ulist *refs,
+ struct ulist *roots, const u64 *extent_item_pos)
{
struct btrfs_key key;
struct btrfs_path *path;
@@ -837,7 +836,7 @@ again:
btrfs_put_delayed_ref(&head->node);
goto again;
}
- ret = __add_delayed_refs(head, delayed_ref_seq,
+ ret = __add_delayed_refs(head, time_seq,
&prefs_delayed);
mutex_unlock(&head->mutex);
if (ret) {
@@ -981,8 +980,7 @@ static void free_leaf_list(struct ulist *blocks)
*/
static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr,
- u64 delayed_ref_seq, u64 time_seq,
- struct ulist **leafs,
+ u64 time_seq, struct ulist **leafs,
const u64 *extent_item_pos)
{
struct ulist *tmp;
@@ -997,7 +995,7 @@ static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
return -ENOMEM;
}
- ret = find_parent_nodes(trans, fs_info, bytenr, delayed_ref_seq,
+ ret = find_parent_nodes(trans, fs_info, bytenr,
time_seq, *leafs, tmp, extent_item_pos);
ulist_free(tmp);
@@ -1024,8 +1022,7 @@ static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
*/
int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr,
- u64 delayed_ref_seq, u64 time_seq,
- struct ulist **roots)
+ u64 time_seq, struct ulist **roots)
{
struct ulist *tmp;
struct ulist_node *node = NULL;
@@ -1043,7 +1040,7 @@ int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
ULIST_ITER_INIT(&uiter);
while (1) {
- ret = find_parent_nodes(trans, fs_info, bytenr, delayed_ref_seq,
+ ret = find_parent_nodes(trans, fs_info, bytenr,
time_seq, tmp, *roots, NULL);
if (ret < 0 && ret != -ENOENT) {
ulist_free(tmp);
@@ -1125,10 +1122,10 @@ static int inode_ref_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
* required for the path to fit into the buffer. in that case, the returned
* value will be smaller than dest. callers must check this!
*/
-static char *iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
- struct btrfs_inode_ref *iref,
- struct extent_buffer *eb_in, u64 parent,
- char *dest, u32 size)
+char *btrfs_iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
+ struct btrfs_inode_ref *iref,
+ struct extent_buffer *eb_in, u64 parent,
+ char *dest, u32 size)
{
u32 len;
int slot;
@@ -1376,11 +1373,9 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
struct ulist *roots = NULL;
struct ulist_node *ref_node = NULL;
struct ulist_node *root_node = NULL;
- struct seq_list seq_elem = {};
struct seq_list tree_mod_seq_elem = {};
struct ulist_iterator ref_uiter;
struct ulist_iterator root_uiter;
- struct btrfs_delayed_ref_root *delayed_refs = NULL;
pr_debug("resolving all inodes for extent %llu\n",
extent_item_objectid);
@@ -1391,16 +1386,11 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
trans = btrfs_join_transaction(fs_info->extent_root);
if (IS_ERR(trans))
return PTR_ERR(trans);
-
- delayed_refs = &trans->transaction->delayed_refs;
- spin_lock(&delayed_refs->lock);
- btrfs_get_delayed_seq(delayed_refs, &seq_elem);
- spin_unlock(&delayed_refs->lock);
btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
}
ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
- seq_elem.seq, tree_mod_seq_elem.seq, &refs,
+ tree_mod_seq_elem.seq, &refs,
&extent_item_pos);
if (ret)
goto out;
@@ -1408,8 +1398,7 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
ULIST_ITER_INIT(&ref_uiter);
while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
ret = btrfs_find_all_roots(trans, fs_info, ref_node->val,
- seq_elem.seq,
- tree_mod_seq_elem.seq, &roots);
+ tree_mod_seq_elem.seq, &roots);
if (ret)
break;
ULIST_ITER_INIT(&root_uiter);
@@ -1431,7 +1420,6 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
out:
if (!search_commit_root) {
btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
- btrfs_put_delayed_seq(delayed_refs, &seq_elem);
btrfs_end_transaction(trans, fs_info->extent_root);
}
@@ -1543,7 +1531,7 @@ static int inode_to_path(u64 inum, struct btrfs_inode_ref *iref,
ipath->fspath->bytes_left - s_ptr : 0;
fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
- fspath = iref_to_path(ipath->fs_root, ipath->btrfs_path, iref, eb,
+ fspath = btrfs_iref_to_path(ipath->fs_root, ipath->btrfs_path, iref, eb,
inum, fspath_min, bytes_left);
if (IS_ERR(fspath))
return PTR_ERR(fspath);
diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
index c18d8ac7b79..032f4dc7eab 100644
--- a/fs/btrfs/backref.h
+++ b/fs/btrfs/backref.h
@@ -21,6 +21,7 @@
#include "ioctl.h"
#include "ulist.h"
+#include "extent_io.h"
#define BTRFS_BACKREF_SEARCH_COMMIT_ROOT ((struct btrfs_trans_handle *)0)
@@ -58,8 +59,10 @@ int paths_from_inode(u64 inum, struct inode_fs_paths *ipath);
int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr,
- u64 delayed_ref_seq, u64 time_seq,
- struct ulist **roots);
+ u64 time_seq, struct ulist **roots);
+char *btrfs_iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
+ struct btrfs_inode_ref *iref, struct extent_buffer *eb,
+ u64 parent, char *dest, u32 size);
struct btrfs_data_container *init_data_container(u32 total_bytes);
struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 12394a90d60..5b2ad6bc4fe 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -87,9 +87,6 @@ struct btrfs_inode {
/* node for the red-black tree that links inodes in subvolume root */
struct rb_node rb_node;
- /* the space_info for where this inode's data allocations are done */
- struct btrfs_space_info *space_info;
-
unsigned long runtime_flags;
/* full 64 bit generation number, struct vfs_inode doesn't have a big
@@ -191,11 +188,14 @@ static inline void btrfs_i_size_write(struct inode *inode, u64 size)
BTRFS_I(inode)->disk_i_size = size;
}
-static inline bool btrfs_is_free_space_inode(struct btrfs_root *root,
- struct inode *inode)
+static inline bool btrfs_is_free_space_inode(struct inode *inode)
{
- if (root == root->fs_info->tree_root ||
- BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID)
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+
+ if (root == root->fs_info->tree_root &&
+ btrfs_ino(inode) != BTRFS_BTREE_INODE_OBJECTID)
+ return true;
+ if (BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID)
return true;
return false;
}
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index da6e9364a5e..9197e2e3340 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -1032,6 +1032,7 @@ continue_with_current_leaf_stack_frame:
struct btrfs_disk_key *disk_key;
u8 type;
u32 item_offset;
+ u32 item_size;
if (disk_item_offset + sizeof(struct btrfs_item) >
sf->block_ctx->len) {
@@ -1047,6 +1048,7 @@ leaf_item_out_of_bounce_error:
disk_item_offset,
sizeof(struct btrfs_item));
item_offset = le32_to_cpu(disk_item.offset);
+ item_size = le32_to_cpu(disk_item.size);
disk_key = &disk_item.key;
type = disk_key->type;
@@ -1057,14 +1059,13 @@ leaf_item_out_of_bounce_error:
root_item_offset = item_offset +
offsetof(struct btrfs_leaf, items);
- if (root_item_offset +
- sizeof(struct btrfs_root_item) >
+ if (root_item_offset + item_size >
sf->block_ctx->len)
goto leaf_item_out_of_bounce_error;
btrfsic_read_from_block_data(
sf->block_ctx, &root_item,
root_item_offset,
- sizeof(struct btrfs_root_item));
+ item_size);
next_bytenr = le64_to_cpu(root_item.bytenr);
sf->error =
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 8206b390058..9d7621f271f 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -321,7 +321,7 @@ struct tree_mod_root {
struct tree_mod_elem {
struct rb_node node;
u64 index; /* shifted logical */
- struct seq_list elem;
+ u64 seq;
enum mod_log_op op;
/* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
@@ -341,20 +341,50 @@ struct tree_mod_elem {
struct tree_mod_root old_root;
};
-static inline void
-__get_tree_mod_seq(struct btrfs_fs_info *fs_info, struct seq_list *elem)
+static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info)
{
- elem->seq = atomic_inc_return(&fs_info->tree_mod_seq);
- list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
+ read_lock(&fs_info->tree_mod_log_lock);
}
-void btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
- struct seq_list *elem)
+static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info)
+{
+ read_unlock(&fs_info->tree_mod_log_lock);
+}
+
+static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info)
+{
+ write_lock(&fs_info->tree_mod_log_lock);
+}
+
+static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info)
+{
+ write_unlock(&fs_info->tree_mod_log_lock);
+}
+
+/*
+ * This adds a new blocker to the tree mod log's blocker list if the @elem
+ * passed does not already have a sequence number set. So when a caller expects
+ * to record tree modifications, it should ensure to set elem->seq to zero
+ * before calling btrfs_get_tree_mod_seq.
+ * Returns a fresh, unused tree log modification sequence number, even if no new
+ * blocker was added.
+ */
+u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
+ struct seq_list *elem)
{
- elem->flags = 1;
+ u64 seq;
+
+ tree_mod_log_write_lock(fs_info);
spin_lock(&fs_info->tree_mod_seq_lock);
- __get_tree_mod_seq(fs_info, elem);
+ if (!elem->seq) {
+ elem->seq = btrfs_inc_tree_mod_seq(fs_info);
+ list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
+ }
+ seq = btrfs_inc_tree_mod_seq(fs_info);
spin_unlock(&fs_info->tree_mod_seq_lock);
+ tree_mod_log_write_unlock(fs_info);
+
+ return seq;
}
void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
@@ -371,41 +401,46 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
if (!seq_putting)
return;
- BUG_ON(!(elem->flags & 1));
spin_lock(&fs_info->tree_mod_seq_lock);
list_del(&elem->list);
+ elem->seq = 0;
list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
- if ((cur_elem->flags & 1) && cur_elem->seq < min_seq) {
+ if (cur_elem->seq < min_seq) {
if (seq_putting > cur_elem->seq) {
/*
* blocker with lower sequence number exists, we
* cannot remove anything from the log
*/
- goto out;
+ spin_unlock(&fs_info->tree_mod_seq_lock);
+ return;
}
min_seq = cur_elem->seq;
}
}
+ spin_unlock(&fs_info->tree_mod_seq_lock);
+
+ /*
+ * we removed the lowest blocker from the blocker list, so there may be
+ * more processible delayed refs.
+ */
+ wake_up(&fs_info->tree_mod_seq_wait);
/*
* anything that's lower than the lowest existing (read: blocked)
* sequence number can be removed from the tree.
*/
- write_lock(&fs_info->tree_mod_log_lock);
+ tree_mod_log_write_lock(fs_info);
tm_root = &fs_info->tree_mod_log;
for (node = rb_first(tm_root); node; node = next) {
next = rb_next(node);
tm = container_of(node, struct tree_mod_elem, node);
- if (tm->elem.seq > min_seq)
+ if (tm->seq > min_seq)
continue;
rb_erase(node, tm_root);
- list_del(&tm->elem.list);
kfree(tm);
}
- write_unlock(&fs_info->tree_mod_log_lock);
-out:
- spin_unlock(&fs_info->tree_mod_seq_lock);
+ tree_mod_log_write_unlock(fs_info);
}
/*
@@ -423,11 +458,9 @@ __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
struct rb_node **new;
struct rb_node *parent = NULL;
struct tree_mod_elem *cur;
- int ret = 0;
- BUG_ON(!tm || !tm->elem.seq);
+ BUG_ON(!tm || !tm->seq);
- write_lock(&fs_info->tree_mod_log_lock);
tm_root = &fs_info->tree_mod_log;
new = &tm_root->rb_node;
while (*new) {
@@ -437,88 +470,81 @@ __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
new = &((*new)->rb_left);
else if (cur->index > tm->index)
new = &((*new)->rb_right);
- else if (cur->elem.seq < tm->elem.seq)
+ else if (cur->seq < tm->seq)
new = &((*new)->rb_left);
- else if (cur->elem.seq > tm->elem.seq)
+ else if (cur->seq > tm->seq)
new = &((*new)->rb_right);
else {
kfree(tm);
- ret = -EEXIST;
- goto unlock;
+ return -EEXIST;
}
}
rb_link_node(&tm->node, parent, new);
rb_insert_color(&tm->node, tm_root);
-unlock:
- write_unlock(&fs_info->tree_mod_log_lock);
- return ret;
+ return 0;
}
+/*
+ * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
+ * returns zero with the tree_mod_log_lock acquired. The caller must hold
+ * this until all tree mod log insertions are recorded in the rb tree and then
+ * call tree_mod_log_write_unlock() to release.
+ */
static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb) {
smp_mb();
if (list_empty(&(fs_info)->tree_mod_seq_list))
return 1;
- if (!eb)
- return 0;
- if (btrfs_header_level(eb) == 0)
+ if (eb && btrfs_header_level(eb) == 0)
+ return 1;
+
+ tree_mod_log_write_lock(fs_info);
+ if (list_empty(&fs_info->tree_mod_seq_list)) {
+ /*
+ * someone emptied the list while we were waiting for the lock.
+ * we must not add to the list when no blocker exists.
+ */
+ tree_mod_log_write_unlock(fs_info);
return 1;
+ }
+
return 0;
}
/*
- * This allocates memory and gets a tree modification sequence number when
- * needed.
+ * This allocates memory and gets a tree modification sequence number.
*
- * Returns 0 when no sequence number is needed, < 0 on error.
- * Returns 1 when a sequence number was added. In this case,
- * fs_info->tree_mod_seq_lock was acquired and must be released by the caller
- * after inserting into the rb tree.
+ * Returns <0 on error.
+ * Returns >0 (the added sequence number) on success.
*/
static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags,
struct tree_mod_elem **tm_ret)
{
struct tree_mod_elem *tm;
- int seq;
- if (tree_mod_dont_log(fs_info, NULL))
- return 0;
-
- tm = *tm_ret = kzalloc(sizeof(*tm), flags);
+ /*
+ * once we switch from spin locks to something different, we should
+ * honor the flags parameter here.
+ */
+ tm = *tm_ret = kzalloc(sizeof(*tm), GFP_ATOMIC);
if (!tm)
return -ENOMEM;
- tm->elem.flags = 0;
- spin_lock(&fs_info->tree_mod_seq_lock);
- if (list_empty(&fs_info->tree_mod_seq_list)) {
- /*
- * someone emptied the list while we were waiting for the lock.
- * we must not add to the list, because no blocker exists. items
- * are removed from the list only when the existing blocker is
- * removed from the list.
- */
- kfree(tm);
- seq = 0;
- spin_unlock(&fs_info->tree_mod_seq_lock);
- } else {
- __get_tree_mod_seq(fs_info, &tm->elem);
- seq = tm->elem.seq;
- }
-
- return seq;
+ tm->seq = btrfs_inc_tree_mod_seq(fs_info);
+ return tm->seq;
}
-static noinline int
-tree_mod_log_insert_key_mask(struct btrfs_fs_info *fs_info,
- struct extent_buffer *eb, int slot,
- enum mod_log_op op, gfp_t flags)
+static inline int
+__tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *eb, int slot,
+ enum mod_log_op op, gfp_t flags)
{
- struct tree_mod_elem *tm;
int ret;
+ struct tree_mod_elem *tm;
ret = tree_mod_alloc(fs_info, flags, &tm);
- if (ret <= 0)
+ if (ret < 0)
return ret;
tm->index = eb->start >> PAGE_CACHE_SHIFT;
@@ -530,8 +556,22 @@ tree_mod_log_insert_key_mask(struct btrfs_fs_info *fs_info,
tm->slot = slot;
tm->generation = btrfs_node_ptr_generation(eb, slot);
- ret = __tree_mod_log_insert(fs_info, tm);
- spin_unlock(&fs_info->tree_mod_seq_lock);
+ return __tree_mod_log_insert(fs_info, tm);
+}
+
+static noinline int
+tree_mod_log_insert_key_mask(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *eb, int slot,
+ enum mod_log_op op, gfp_t flags)
+{
+ int ret;
+
+ if (tree_mod_dont_log(fs_info, eb))
+ return 0;
+
+ ret = __tree_mod_log_insert_key(fs_info, eb, slot, op, flags);
+
+ tree_mod_log_write_unlock(fs_info);
return ret;
}
@@ -543,6 +583,14 @@ tree_mod_log_insert_key(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
}
static noinline int
+tree_mod_log_insert_key_locked(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *eb, int slot,
+ enum mod_log_op op)
+{
+ return __tree_mod_log_insert_key(fs_info, eb, slot, op, GFP_NOFS);
+}
+
+static noinline int
tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb, int dst_slot, int src_slot,
int nr_items, gfp_t flags)
@@ -555,14 +603,14 @@ tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
return 0;
for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
- ret = tree_mod_log_insert_key(fs_info, eb, i + dst_slot,
+ ret = tree_mod_log_insert_key_locked(fs_info, eb, i + dst_slot,
MOD_LOG_KEY_REMOVE_WHILE_MOVING);
BUG_ON(ret < 0);
}
ret = tree_mod_alloc(fs_info, flags, &tm);
- if (ret <= 0)
- return ret;
+ if (ret < 0)
+ goto out;
tm->index = eb->start >> PAGE_CACHE_SHIFT;
tm->slot = src_slot;
@@ -571,10 +619,26 @@ tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
tm->op = MOD_LOG_MOVE_KEYS;
ret = __tree_mod_log_insert(fs_info, tm);
- spin_unlock(&fs_info->tree_mod_seq_lock);
+out:
+ tree_mod_log_write_unlock(fs_info);
return ret;
}
+static inline void
+__tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
+{
+ int i;
+ u32 nritems;
+ int ret;
+
+ nritems = btrfs_header_nritems(eb);
+ for (i = nritems - 1; i >= 0; i--) {
+ ret = tree_mod_log_insert_key_locked(fs_info, eb, i,
+ MOD_LOG_KEY_REMOVE_WHILE_FREEING);
+ BUG_ON(ret < 0);
+ }
+}
+
static noinline int
tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
struct extent_buffer *old_root,
@@ -583,9 +647,14 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
struct tree_mod_elem *tm;
int ret;
+ if (tree_mod_dont_log(fs_info, NULL))
+ return 0;
+
+ __tree_mod_log_free_eb(fs_info, old_root);
+
ret = tree_mod_alloc(fs_info, flags, &tm);
- if (ret <= 0)
- return ret;
+ if (ret < 0)
+ goto out;
tm->index = new_root->start >> PAGE_CACHE_SHIFT;
tm->old_root.logical = old_root->start;
@@ -594,7 +663,8 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
tm->op = MOD_LOG_ROOT_REPLACE;
ret = __tree_mod_log_insert(fs_info, tm);
- spin_unlock(&fs_info->tree_mod_seq_lock);
+out:
+ tree_mod_log_write_unlock(fs_info);
return ret;
}
@@ -608,7 +678,7 @@ __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
struct tree_mod_elem *found = NULL;
u64 index = start >> PAGE_CACHE_SHIFT;
- read_lock(&fs_info->tree_mod_log_lock);
+ tree_mod_log_read_lock(fs_info);
tm_root = &fs_info->tree_mod_log;
node = tm_root->rb_node;
while (node) {
@@ -617,18 +687,18 @@ __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
node = node->rb_left;
} else if (cur->index > index) {
node = node->rb_right;
- } else if (cur->elem.seq < min_seq) {
+ } else if (cur->seq < min_seq) {
node = node->rb_left;
} else if (!smallest) {
/* we want the node with the highest seq */
if (found)
- BUG_ON(found->elem.seq > cur->elem.seq);
+ BUG_ON(found->seq > cur->seq);
found = cur;
node = node->rb_left;
- } else if (cur->elem.seq > min_seq) {
+ } else if (cur->seq > min_seq) {
/* we want the node with the smallest seq */
if (found)
- BUG_ON(found->elem.seq < cur->elem.seq);
+ BUG_ON(found->seq < cur->seq);
found = cur;
node = node->rb_right;
} else {
@@ -636,7 +706,7 @@ __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
break;
}
}
- read_unlock(&fs_info->tree_mod_log_lock);
+ tree_mod_log_read_unlock(fs_info);
return found;
}
@@ -664,7 +734,7 @@ tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
return __tree_mod_log_search(fs_info, start, min_seq, 0);
}
-static inline void
+static noinline void
tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
struct extent_buffer *src, unsigned long dst_offset,
unsigned long src_offset, int nr_items)
@@ -675,18 +745,23 @@ tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
if (tree_mod_dont_log(fs_info, NULL))
return;
- if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
+ if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0) {
+ tree_mod_log_write_unlock(fs_info);
return;
+ }
- /* speed this up by single seq for all operations? */
for (i = 0; i < nr_items; i++) {
- ret = tree_mod_log_insert_key(fs_info, src, i + src_offset,
- MOD_LOG_KEY_REMOVE);
+ ret = tree_mod_log_insert_key_locked(fs_info, src,
+ i + src_offset,
+ MOD_LOG_KEY_REMOVE);
BUG_ON(ret < 0);
- ret = tree_mod_log_insert_key(fs_info, dst, i + dst_offset,
- MOD_LOG_KEY_ADD);
+ ret = tree_mod_log_insert_key_locked(fs_info, dst,
+ i + dst_offset,
+ MOD_LOG_KEY_ADD);
BUG_ON(ret < 0);
}
+
+ tree_mod_log_write_unlock(fs_info);
}
static inline void
@@ -699,7 +774,7 @@ tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
BUG_ON(ret < 0);
}
-static inline void
+static noinline void
tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb,
struct btrfs_disk_key *disk_key, int slot, int atomic)
@@ -712,30 +787,22 @@ tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
BUG_ON(ret < 0);
}
-static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
- struct extent_buffer *eb)
+static noinline void
+tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
{
- int i;
- int ret;
- u32 nritems;
-
if (tree_mod_dont_log(fs_info, eb))
return;
- nritems = btrfs_header_nritems(eb);
- for (i = nritems - 1; i >= 0; i--) {
- ret = tree_mod_log_insert_key(fs_info, eb, i,
- MOD_LOG_KEY_REMOVE_WHILE_FREEING);
- BUG_ON(ret < 0);
- }
+ __tree_mod_log_free_eb(fs_info, eb);
+
+ tree_mod_log_write_unlock(fs_info);
}
-static inline void
+static noinline void
tree_mod_log_set_root_pointer(struct btrfs_root *root,
struct extent_buffer *new_root_node)
{
int ret;
- tree_mod_log_free_eb(root->fs_info, root->node);
ret = tree_mod_log_insert_root(root->fs_info, root->node,
new_root_node, GFP_NOFS);
BUG_ON(ret < 0);
@@ -1069,7 +1136,7 @@ __tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq,
unsigned long p_size = sizeof(struct btrfs_key_ptr);
n = btrfs_header_nritems(eb);
- while (tm && tm->elem.seq >= time_seq) {
+ while (tm && tm->seq >= time_seq) {
/*
* all the operations are recorded with the operator used for
* the modification. as we're going backwards, we do the
@@ -2722,6 +2789,80 @@ done:
}
/*
+ * helper to use instead of search slot if no exact match is needed but
+ * instead the next or previous item should be returned.
+ * When find_higher is true, the next higher item is returned, the next lower
+ * otherwise.
+ * When return_any and find_higher are both true, and no higher item is found,
+ * return the next lower instead.
+ * When return_any is true and find_higher is false, and no lower item is found,
+ * return the next higher instead.
+ * It returns 0 if any item is found, 1 if none is found (tree empty), and
+ * < 0 on error
+ */
+int btrfs_search_slot_for_read(struct btrfs_root *root,
+ struct btrfs_key *key, struct btrfs_path *p,
+ int find_higher, int return_any)
+{
+ int ret;
+ struct extent_buffer *leaf;
+
+again:
+ ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
+ if (ret <= 0)
+ return ret;
+ /*
+ * a return value of 1 means the path is at the position where the
+ * item should be inserted. Normally this is the next bigger item,
+ * but in case the previous item is the last in a leaf, path points
+ * to the first free slot in the previous leaf, i.e. at an invalid
+ * item.
+ */
+ leaf = p->nodes[0];
+
+ if (find_higher) {
+ if (p->slots[0] >= btrfs_header_nritems(leaf)) {
+ ret = btrfs_next_leaf(root, p);
+ if (ret <= 0)
+ return ret;
+ if (!return_any)
+ return 1;
+ /*
+ * no higher item found, return the next
+ * lower instead
+ */
+ return_any = 0;
+ find_higher = 0;
+ btrfs_release_path(p);
+ goto again;
+ }
+ } else {
+ if (p->slots[0] == 0) {
+ ret = btrfs_prev_leaf(root, p);
+ if (ret < 0)
+ return ret;
+ if (!ret) {
+ p->slots[0] = btrfs_header_nritems(leaf) - 1;
+ return 0;
+ }
+ if (!return_any)
+ return 1;
+ /*
+ * no lower item found, return the next
+ * higher instead
+ */
+ return_any = 0;
+ find_higher = 1;
+ btrfs_release_path(p);
+ goto again;
+ } else {
+ --p->slots[0];
+ }
+ }
+ return 0;
+}
+
+/*
* adjust the pointers going up the tree, starting at level
* making sure the right key of each node is points to 'key'.
* This is used after shifting pointers to the left, so it stops
@@ -4931,6 +5072,431 @@ out:
return ret;
}
+static void tree_move_down(struct btrfs_root *root,
+ struct btrfs_path *path,
+ int *level, int root_level)
+{
+ path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level],
+ path->slots[*level]);
+ path->slots[*level - 1] = 0;
+ (*level)--;
+}
+
+static int tree_move_next_or_upnext(struct btrfs_root *root,
+ struct btrfs_path *path,
+ int *level, int root_level)
+{
+ int ret = 0;
+ int nritems;
+ nritems = btrfs_header_nritems(path->nodes[*level]);
+
+ path->slots[*level]++;
+
+ while (path->slots[*level] == nritems) {
+ if (*level == root_level)
+ return -1;
+
+ /* move upnext */
+ path->slots[*level] = 0;
+ free_extent_buffer(path->nodes[*level]);
+ path->nodes[*level] = NULL;
+ (*level)++;
+ path->slots[*level]++;
+
+ nritems = btrfs_header_nritems(path->nodes[*level]);
+ ret = 1;
+ }
+ return ret;
+}
+
+/*
+ * Returns 1 if it had to move up and next. 0 is returned if it moved only next
+ * or down.
+ */
+static int tree_advance(struct btrfs_root *root,
+ struct btrfs_path *path,
+ int *level, int root_level,
+ int allow_down,
+ struct btrfs_key *key)
+{
+ int ret;
+
+ if (*level == 0 || !allow_down) {
+ ret = tree_move_next_or_upnext(root, path, level, root_level);
+ } else {
+ tree_move_down(root, path, level, root_level);
+ ret = 0;
+ }
+ if (ret >= 0) {
+ if (*level == 0)
+ btrfs_item_key_to_cpu(path->nodes[*level], key,
+ path->slots[*level]);
+ else
+ btrfs_node_key_to_cpu(path->nodes[*level], key,
+ path->slots[*level]);
+ }
+ return ret;
+}
+
+static int tree_compare_item(struct btrfs_root *left_root,
+ struct btrfs_path *left_path,
+ struct btrfs_path *right_path,
+ char *tmp_buf)
+{
+ int cmp;
+ int len1, len2;
+ unsigned long off1, off2;
+
+ len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
+ len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
+ if (len1 != len2)
+ return 1;
+
+ off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
+ off2 = btrfs_item_ptr_offset(right_path->nodes[0],
+ right_path->slots[0]);
+
+ read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
+
+ cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
+ if (cmp)
+ return 1;
+ return 0;
+}
+
+#define ADVANCE 1
+#define ADVANCE_ONLY_NEXT -1
+
+/*
+ * This function compares two trees and calls the provided callback for
+ * every changed/new/deleted item it finds.
+ * If shared tree blocks are encountered, whole subtrees are skipped, making
+ * the compare pretty fast on snapshotted subvolumes.
+ *
+ * This currently works on commit roots only. As commit roots are read only,
+ * we don't do any locking. The commit roots are protected with transactions.
+ * Transactions are ended and rejoined when a commit is tried in between.
+ *
+ * This function checks for modifications done to the trees while comparing.
+ * If it detects a change, it aborts immediately.
+ */
+int btrfs_compare_trees(struct btrfs_root *left_root,
+ struct btrfs_root *right_root,
+ btrfs_changed_cb_t changed_cb, void *ctx)
+{
+ int ret;
+ int cmp;
+ struct btrfs_trans_handle *trans = NULL;
+ struct btrfs_path *left_path = NULL;
+ struct btrfs_path *right_path = NULL;
+ struct btrfs_key left_key;
+ struct btrfs_key right_key;
+ char *tmp_buf = NULL;
+ int left_root_level;
+ int right_root_level;
+ int left_level;
+ int right_level;
+ int left_end_reached;
+ int right_end_reached;
+ int advance_left;
+ int advance_right;
+ u64 left_blockptr;
+ u64 right_blockptr;
+ u64 left_start_ctransid;
+ u64 right_start_ctransid;
+ u64 ctransid;
+
+ left_path = btrfs_alloc_path();
+ if (!left_path) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ right_path = btrfs_alloc_path();
+ if (!right_path) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ tmp_buf = kmalloc(left_root->leafsize, GFP_NOFS);
+ if (!tmp_buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ left_path->search_commit_root = 1;
+ left_path->skip_locking = 1;
+ right_path->search_commit_root = 1;
+ right_path->skip_locking = 1;
+
+ spin_lock(&left_root->root_times_lock);
+ left_start_ctransid = btrfs_root_ctransid(&left_root->root_item);
+ spin_unlock(&left_root->root_times_lock);
+
+ spin_lock(&right_root->root_times_lock);
+ right_start_ctransid = btrfs_root_ctransid(&right_root->root_item);
+ spin_unlock(&right_root->root_times_lock);
+
+ trans = btrfs_join_transaction(left_root);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ trans = NULL;
+ goto out;
+ }
+
+ /*
+ * Strategy: Go to the first items of both trees. Then do
+ *
+ * If both trees are at level 0
+ * Compare keys of current items
+ * If left < right treat left item as new, advance left tree
+ * and repeat
+ * If left > right treat right item as deleted, advance right tree
+ * and repeat
+ * If left == right do deep compare of items, treat as changed if
+ * needed, advance both trees and repeat
+ * If both trees are at the same level but not at level 0
+ * Compare keys of current nodes/leafs
+ * If left < right advance left tree and repeat
+ * If left > right advance right tree and repeat
+ * If left == right compare blockptrs of the next nodes/leafs
+ * If they match advance both trees but stay at the same level
+ * and repeat
+ * If they don't match advance both trees while allowing to go
+ * deeper and repeat
+ * If tree levels are different
+ * Advance the tree that needs it and repeat
+ *
+ * Advancing a tree means:
+ * If we are at level 0, try to go to the next slot. If that's not
+ * possible, go one level up and repeat. Stop when we found a level
+ * where we could go to the next slot. We may at this point be on a
+ * node or a leaf.
+ *
+ * If we are not at level 0 and not on shared tree blocks, go one
+ * level deeper.
+ *
+ * If we are not at level 0 and on shared tree blocks, go one slot to
+ * the right if possible or go up and right.
+ */
+
+ left_level = btrfs_header_level(left_root->commit_root);
+ left_root_level = left_level;
+ left_path->nodes[left_level] = left_root->commit_root;
+ extent_buffer_get(left_path->nodes[left_level]);
+
+ right_level = btrfs_header_level(right_root->commit_root);
+ right_root_level = right_level;
+ right_path->nodes[right_level] = right_root->commit_root;
+ extent_buffer_get(right_path->nodes[right_level]);
+
+ if (left_level == 0)
+ btrfs_item_key_to_cpu(left_path->nodes[left_level],
+ &left_key, left_path->slots[left_level]);
+ else
+ btrfs_node_key_to_cpu(left_path->nodes[left_level],
+ &left_key, left_path->slots[left_level]);
+ if (right_level == 0)
+ btrfs_item_key_to_cpu(right_path->nodes[right_level],
+ &right_key, right_path->slots[right_level]);
+ else
+ btrfs_node_key_to_cpu(right_path->nodes[right_level],
+ &right_key, right_path->slots[right_level]);
+
+ left_end_reached = right_end_reached = 0;
+ advance_left = advance_right = 0;
+
+ while (1) {
+ /*
+ * We need to make sure the transaction does not get committed
+ * while we do anything on commit roots. This means, we need to
+ * join and leave transactions for every item that we process.
+ */
+ if (trans && btrfs_should_end_transaction(trans, left_root)) {
+ btrfs_release_path(left_path);
+ btrfs_release_path(right_path);
+
+ ret = btrfs_end_transaction(trans, left_root);
+ trans = NULL;
+ if (ret < 0)
+ goto out;
+ }
+ /* now rejoin the transaction */
+ if (!trans) {
+ trans = btrfs_join_transaction(left_root);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ trans = NULL;
+ goto out;
+ }
+
+ spin_lock(&left_root->root_times_lock);
+ ctransid = btrfs_root_ctransid(&left_root->root_item);
+ spin_unlock(&left_root->root_times_lock);
+ if (ctransid != left_start_ctransid)
+ left_start_ctransid = 0;
+
+ spin_lock(&right_root->root_times_lock);
+ ctransid = btrfs_root_ctransid(&right_root->root_item);
+ spin_unlock(&right_root->root_times_lock);
+ if (ctransid != right_start_ctransid)
+ right_start_ctransid = 0;
+
+ if (!left_start_ctransid || !right_start_ctransid) {
+ WARN(1, KERN_WARNING
+ "btrfs: btrfs_compare_tree detected "
+ "a change in one of the trees while "
+ "iterating. This is probably a "
+ "bug.\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ /*
+ * the commit root may have changed, so start again
+ * where we stopped
+ */
+ left_path->lowest_level = left_level;
+ right_path->lowest_level = right_level;
+ ret = btrfs_search_slot(NULL, left_root,
+ &left_key, left_path, 0, 0);
+ if (ret < 0)
+ goto out;
+ ret = btrfs_search_slot(NULL, right_root,
+ &right_key, right_path, 0, 0);
+ if (ret < 0)
+ goto out;
+ }
+
+ if (advance_left && !left_end_reached) {
+ ret = tree_advance(left_root, left_path, &left_level,
+ left_root_level,
+ advance_left != ADVANCE_ONLY_NEXT,
+ &left_key);
+ if (ret < 0)
+ left_end_reached = ADVANCE;
+ advance_left = 0;
+ }
+ if (advance_right && !right_end_reached) {
+ ret = tree_advance(right_root, right_path, &right_level,
+ right_root_level,
+ advance_right != ADVANCE_ONLY_NEXT,
+ &right_key);
+ if (ret < 0)
+ right_end_reached = ADVANCE;
+ advance_right = 0;
+ }
+
+ if (left_end_reached && right_end_reached) {
+ ret = 0;
+ goto out;
+ } else if (left_end_reached) {
+ if (right_level == 0) {
+ ret = changed_cb(left_root, right_root,
+ left_path, right_path,
+ &right_key,
+ BTRFS_COMPARE_TREE_DELETED,
+ ctx);
+ if (ret < 0)
+ goto out;
+ }
+ advance_right = ADVANCE;
+ continue;
+ } else if (right_end_reached) {
+ if (left_level == 0) {
+ ret = changed_cb(left_root, right_root,
+ left_path, right_path,
+ &left_key,
+ BTRFS_COMPARE_TREE_NEW,
+ ctx);
+ if (ret < 0)
+ goto out;
+ }
+ advance_left = ADVANCE;
+ continue;
+ }
+
+ if (left_level == 0 && right_level == 0) {
+ cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
+ if (cmp < 0) {
+ ret = changed_cb(left_root, right_root,
+ left_path, right_path,
+ &left_key,
+ BTRFS_COMPARE_TREE_NEW,
+ ctx);
+ if (ret < 0)
+ goto out;
+ advance_left = ADVANCE;
+ } else if (cmp > 0) {
+ ret = changed_cb(left_root, right_root,
+ left_path, right_path,
+ &right_key,
+ BTRFS_COMPARE_TREE_DELETED,
+ ctx);
+ if (ret < 0)
+ goto out;
+ advance_right = ADVANCE;
+ } else {
+ ret = tree_compare_item(left_root, left_path,
+ right_path, tmp_buf);
+ if (ret) {
+ ret = changed_cb(left_root, right_root,
+ left_path, right_path,
+ &left_key,
+ BTRFS_COMPARE_TREE_CHANGED,
+ ctx);
+ if (ret < 0)
+ goto out;
+ }
+ advance_left = ADVANCE;
+ advance_right = ADVANCE;
+ }
+ } else if (left_level == right_level) {
+ cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
+ if (cmp < 0) {
+ advance_left = ADVANCE;
+ } else if (cmp > 0) {
+ advance_right = ADVANCE;
+ } else {
+ left_blockptr = btrfs_node_blockptr(
+ left_path->nodes[left_level],
+ left_path->slots[left_level]);
+ right_blockptr = btrfs_node_blockptr(
+ right_path->nodes[right_level],
+ right_path->slots[right_level]);
+ if (left_blockptr == right_blockptr) {
+ /*
+ * As we're on a shared block, don't
+ * allow to go deeper.
+ */
+ advance_left = ADVANCE_ONLY_NEXT;
+ advance_right = ADVANCE_ONLY_NEXT;
+ } else {
+ advance_left = ADVANCE;
+ advance_right = ADVANCE;
+ }
+ }
+ } else if (left_level < right_level) {
+ advance_right = ADVANCE;
+ } else {
+ advance_left = ADVANCE;
+ }
+ }
+
+out:
+ btrfs_free_path(left_path);
+ btrfs_free_path(right_path);
+ kfree(tmp_buf);
+
+ if (trans) {
+ if (!ret)
+ ret = btrfs_end_transaction(trans, left_root);
+ else
+ btrfs_end_transaction(trans, left_root);
+ }
+
+ return ret;
+}
+
/*
* this is similar to btrfs_next_leaf, but does not try to preserve
* and fixup the path. It looks for and returns the next key in the
@@ -5127,6 +5693,7 @@ again:
* locked. To solve this situation, we give up
* on our lock and cycle.
*/
+ free_extent_buffer(next);
btrfs_release_path(path);
cond_resched();
goto again;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index fa5c45b3907..adb1cd7ceb9 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -91,6 +91,9 @@ struct btrfs_ordered_sum;
/* for storing balance parameters in the root tree */
#define BTRFS_BALANCE_OBJECTID -4ULL
+/* holds quota configuration and tracking */
+#define BTRFS_QUOTA_TREE_OBJECTID 8ULL
+
/* orhpan objectid for tracking unlinked/truncated files */
#define BTRFS_ORPHAN_OBJECTID -5ULL
@@ -709,6 +712,36 @@ struct btrfs_root_item {
struct btrfs_disk_key drop_progress;
u8 drop_level;
u8 level;
+
+ /*
+ * The following fields appear after subvol_uuids+subvol_times
+ * were introduced.
+ */
+
+ /*
+ * This generation number is used to test if the new fields are valid
+ * and up to date while reading the root item. Everytime the root item
+ * is written out, the "generation" field is copied into this field. If
+ * anyone ever mounted the fs with an older kernel, we will have
+ * mismatching generation values here and thus must invalidate the
+ * new fields. See btrfs_update_root and btrfs_find_last_root for
+ * details.
+ * the offset of generation_v2 is also used as the start for the memset
+ * when invalidating the fields.
+ */
+ __le64 generation_v2;
+ u8 uuid[BTRFS_UUID_SIZE];
+ u8 parent_uuid[BTRFS_UUID_SIZE];
+ u8 received_uuid[BTRFS_UUID_SIZE];
+ __le64 ctransid; /* updated when an inode changes */
+ __le64 otransid; /* trans when created */
+ __le64 stransid; /* trans when sent. non-zero for received subvol */
+ __le64 rtransid; /* trans when received. non-zero for received subvol */
+ struct btrfs_timespec ctime;
+ struct btrfs_timespec otime;
+ struct btrfs_timespec stime;
+ struct btrfs_timespec rtime;
+ __le64 reserved[8]; /* for future */
} __attribute__ ((__packed__));
/*
@@ -883,6 +916,72 @@ struct btrfs_block_group_item {
__le64 flags;
} __attribute__ ((__packed__));
+/*
+ * is subvolume quota turned on?
+ */
+#define BTRFS_QGROUP_STATUS_FLAG_ON (1ULL << 0)
+/*
+ * SCANNING is set during the initialization phase
+ */
+#define BTRFS_QGROUP_STATUS_FLAG_SCANNING (1ULL << 1)
+/*
+ * Some qgroup entries are known to be out of date,
+ * either because the configuration has changed in a way that
+ * makes a rescan necessary, or because the fs has been mounted
+ * with a non-qgroup-aware version.
+ * Turning qouta off and on again makes it inconsistent, too.
+ */
+#define BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT (1ULL << 2)
+
+#define BTRFS_QGROUP_STATUS_VERSION 1
+
+struct btrfs_qgroup_status_item {
+ __le64 version;
+ /*
+ * the generation is updated during every commit. As older
+ * versions of btrfs are not aware of qgroups, it will be
+ * possible to detect inconsistencies by checking the
+ * generation on mount time
+ */
+ __le64 generation;
+
+ /* flag definitions see above */
+ __le64 flags;
+
+ /*
+ * only used during scanning to record the progress
+ * of the scan. It contains a logical address
+ */
+ __le64 scan;
+} __attribute__ ((__packed__));
+
+struct btrfs_qgroup_info_item {
+ __le64 generation;
+ __le64 rfer;
+ __le64 rfer_cmpr;
+ __le64 excl;
+ __le64 excl_cmpr;
+} __attribute__ ((__packed__));
+
+/* flags definition for qgroup limits */
+#define BTRFS_QGROUP_LIMIT_MAX_RFER (1ULL << 0)
+#define BTRFS_QGROUP_LIMIT_MAX_EXCL (1ULL << 1)
+#define BTRFS_QGROUP_LIMIT_RSV_RFER (1ULL << 2)
+#define BTRFS_QGROUP_LIMIT_RSV_EXCL (1ULL << 3)
+#define BTRFS_QGROUP_LIMIT_RFER_CMPR (1ULL << 4)
+#define BTRFS_QGROUP_LIMIT_EXCL_CMPR (1ULL << 5)
+
+struct btrfs_qgroup_limit_item {
+ /*
+ * only updated when any of the other values change
+ */
+ __le64 flags;
+ __le64 max_rfer;
+ __le64 max_excl;
+ __le64 rsv_rfer;
+ __le64 rsv_excl;
+} __attribute__ ((__packed__));
+
struct btrfs_space_info {
u64 flags;
@@ -1030,6 +1129,13 @@ struct btrfs_block_group_cache {
struct list_head cluster_list;
};
+/* delayed seq elem */
+struct seq_list {
+ struct list_head list;
+ u64 seq;
+};
+
+/* fs_info */
struct reloc_control;
struct btrfs_device;
struct btrfs_fs_devices;
@@ -1044,6 +1150,7 @@ struct btrfs_fs_info {
struct btrfs_root *dev_root;
struct btrfs_root *fs_root;
struct btrfs_root *csum_root;
+ struct btrfs_root *quota_root;
/* the log root tree is a directory of all the other log roots */
struct btrfs_root *log_root_tree;
@@ -1144,6 +1251,8 @@ struct btrfs_fs_info {
spinlock_t tree_mod_seq_lock;
atomic_t tree_mod_seq;
struct list_head tree_mod_seq_list;
+ struct seq_list tree_mod_seq_elem;
+ wait_queue_head_t tree_mod_seq_wait;
/* this protects tree_mod_log */
rwlock_t tree_mod_log_lock;
@@ -1240,6 +1349,8 @@ struct btrfs_fs_info {
*/
struct list_head space_info;
+ struct btrfs_space_info *data_sinfo;
+
struct reloc_control *reloc_ctl;
spinlock_t delalloc_lock;
@@ -1296,6 +1407,29 @@ struct btrfs_fs_info {
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
u32 check_integrity_print_mask;
#endif
+ /*
+ * quota information
+ */
+ unsigned int quota_enabled:1;
+
+ /*
+ * quota_enabled only changes state after a commit. This holds the
+ * next state.
+ */
+ unsigned int pending_quota_state:1;
+
+ /* is qgroup tracking in a consistent state? */
+ u64 qgroup_flags;
+
+ /* holds configuration and tracking. Protected by qgroup_lock */
+ struct rb_root qgroup_tree;
+ spinlock_t qgroup_lock;
+
+ /* list of dirty qgroups to be written at next commit */
+ struct list_head dirty_qgroups;
+
+ /* used by btrfs_qgroup_record_ref for an efficient tree traversal */
+ u64 qgroup_seq;
/* filesystem state */
u64 fs_state;
@@ -1416,6 +1550,8 @@ struct btrfs_root {
dev_t anon_dev;
int force_cow;
+
+ spinlock_t root_times_lock;
};
struct btrfs_ioctl_defrag_range_args {
@@ -1525,6 +1661,30 @@ struct btrfs_ioctl_defrag_range_args {
#define BTRFS_DEV_ITEM_KEY 216
#define BTRFS_CHUNK_ITEM_KEY 228
+/*
+ * Records the overall state of the qgroups.
+ * There's only one instance of this key present,
+ * (0, BTRFS_QGROUP_STATUS_KEY, 0)
+ */
+#define BTRFS_QGROUP_STATUS_KEY 240
+/*
+ * Records the currently used space of the qgroup.
+ * One key per qgroup, (0, BTRFS_QGROUP_INFO_KEY, qgroupid).
+ */
+#define BTRFS_QGROUP_INFO_KEY 242
+/*
+ * Contains the user configured limits for the qgroup.
+ * One key per qgroup, (0, BTRFS_QGROUP_LIMIT_KEY, qgroupid).
+ */
+#define BTRFS_QGROUP_LIMIT_KEY 244
+/*
+ * Records the child-parent relationship of qgroups. For
+ * each relation, 2 keys are present:
+ * (childid, BTRFS_QGROUP_RELATION_KEY, parentid)
+ * (parentid, BTRFS_QGROUP_RELATION_KEY, childid)
+ */
+#define BTRFS_QGROUP_RELATION_KEY 246
+
#define BTRFS_BALANCE_ITEM_KEY 248
/*
@@ -1621,13 +1781,54 @@ static inline void btrfs_init_map_token (struct btrfs_map_token *token)
offsetof(type, member), \
sizeof(((type *)0)->member)))
-#ifndef BTRFS_SETGET_FUNCS
+#define DECLARE_BTRFS_SETGET_BITS(bits) \
+u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr, \
+ unsigned long off, \
+ struct btrfs_map_token *token); \
+void btrfs_set_token_##bits(struct extent_buffer *eb, void *ptr, \
+ unsigned long off, u##bits val, \
+ struct btrfs_map_token *token); \
+static inline u##bits btrfs_get_##bits(struct extent_buffer *eb, void *ptr, \
+ unsigned long off) \
+{ \
+ return btrfs_get_token_##bits(eb, ptr, off, NULL); \
+} \
+static inline void btrfs_set_##bits(struct extent_buffer *eb, void *ptr, \
+ unsigned long off, u##bits val) \
+{ \
+ btrfs_set_token_##bits(eb, ptr, off, val, NULL); \
+}
+
+DECLARE_BTRFS_SETGET_BITS(8)
+DECLARE_BTRFS_SETGET_BITS(16)
+DECLARE_BTRFS_SETGET_BITS(32)
+DECLARE_BTRFS_SETGET_BITS(64)
+
#define BTRFS_SETGET_FUNCS(name, type, member, bits) \
-u##bits btrfs_##name(struct extent_buffer *eb, type *s); \
-u##bits btrfs_token_##name(struct extent_buffer *eb, type *s, struct btrfs_map_token *token); \
-void btrfs_set_token_##name(struct extent_buffer *eb, type *s, u##bits val, struct btrfs_map_token *token);\
-void btrfs_set_##name(struct extent_buffer *eb, type *s, u##bits val);
-#endif
+static inline u##bits btrfs_##name(struct extent_buffer *eb, type *s) \
+{ \
+ BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \
+ return btrfs_get_##bits(eb, s, offsetof(type, member)); \
+} \
+static inline void btrfs_set_##name(struct extent_buffer *eb, type *s, \
+ u##bits val) \
+{ \
+ BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \
+ btrfs_set_##bits(eb, s, offsetof(type, member), val); \
+} \
+static inline u##bits btrfs_token_##name(struct extent_buffer *eb, type *s, \
+ struct btrfs_map_token *token) \
+{ \
+ BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \
+ return btrfs_get_token_##bits(eb, s, offsetof(type, member), token); \
+} \
+static inline void btrfs_set_token_##name(struct extent_buffer *eb, \
+ type *s, u##bits val, \
+ struct btrfs_map_token *token) \
+{ \
+ BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \
+ btrfs_set_token_##bits(eb, s, offsetof(type, member), val, token); \
+}
#define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \
static inline u##bits btrfs_##name(struct extent_buffer *eb) \
@@ -2189,6 +2390,16 @@ BTRFS_SETGET_STACK_FUNCS(root_used, struct btrfs_root_item, bytes_used, 64);
BTRFS_SETGET_STACK_FUNCS(root_limit, struct btrfs_root_item, byte_limit, 64);
BTRFS_SETGET_STACK_FUNCS(root_last_snapshot, struct btrfs_root_item,
last_snapshot, 64);
+BTRFS_SETGET_STACK_FUNCS(root_generation_v2, struct btrfs_root_item,
+ generation_v2, 64);
+BTRFS_SETGET_STACK_FUNCS(root_ctransid, struct btrfs_root_item,
+ ctransid, 64);
+BTRFS_SETGET_STACK_FUNCS(root_otransid, struct btrfs_root_item,
+ otransid, 64);
+BTRFS_SETGET_STACK_FUNCS(root_stransid, struct btrfs_root_item,
+ stransid, 64);
+BTRFS_SETGET_STACK_FUNCS(root_rtransid, struct btrfs_root_item,
+ rtransid, 64);
static inline bool btrfs_root_readonly(struct btrfs_root *root)
{
@@ -2465,6 +2676,49 @@ static inline void btrfs_set_dev_stats_value(struct extent_buffer *eb,
sizeof(val));
}
+/* btrfs_qgroup_status_item */
+BTRFS_SETGET_FUNCS(qgroup_status_generation, struct btrfs_qgroup_status_item,
+ generation, 64);
+BTRFS_SETGET_FUNCS(qgroup_status_version, struct btrfs_qgroup_status_item,
+ version, 64);
+BTRFS_SETGET_FUNCS(qgroup_status_flags, struct btrfs_qgroup_status_item,
+ flags, 64);
+BTRFS_SETGET_FUNCS(qgroup_status_scan, struct btrfs_qgroup_status_item,
+ scan, 64);
+
+/* btrfs_qgroup_info_item */
+BTRFS_SETGET_FUNCS(qgroup_info_generation, struct btrfs_qgroup_info_item,
+ generation, 64);
+BTRFS_SETGET_FUNCS(qgroup_info_rfer, struct btrfs_qgroup_info_item, rfer, 64);
+BTRFS_SETGET_FUNCS(qgroup_info_rfer_cmpr, struct btrfs_qgroup_info_item,
+ rfer_cmpr, 64);
+BTRFS_SETGET_FUNCS(qgroup_info_excl, struct btrfs_qgroup_info_item, excl, 64);
+BTRFS_SETGET_FUNCS(qgroup_info_excl_cmpr, struct btrfs_qgroup_info_item,
+ excl_cmpr, 64);
+
+BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_generation,
+ struct btrfs_qgroup_info_item, generation, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_rfer, struct btrfs_qgroup_info_item,
+ rfer, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_rfer_cmpr,
+ struct btrfs_qgroup_info_item, rfer_cmpr, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_excl, struct btrfs_qgroup_info_item,
+ excl, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_excl_cmpr,
+ struct btrfs_qgroup_info_item, excl_cmpr, 64);
+
+/* btrfs_qgroup_limit_item */
+BTRFS_SETGET_FUNCS(qgroup_limit_flags, struct btrfs_qgroup_limit_item,
+ flags, 64);
+BTRFS_SETGET_FUNCS(qgroup_limit_max_rfer, struct btrfs_qgroup_limit_item,
+ max_rfer, 64);
+BTRFS_SETGET_FUNCS(qgroup_limit_max_excl, struct btrfs_qgroup_limit_item,
+ max_excl, 64);
+BTRFS_SETGET_FUNCS(qgroup_limit_rsv_rfer, struct btrfs_qgroup_limit_item,
+ rsv_rfer, 64);
+BTRFS_SETGET_FUNCS(qgroup_limit_rsv_excl, struct btrfs_qgroup_limit_item,
+ rsv_excl, 64);
+
static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb)
{
return sb->s_fs_info;
@@ -2607,7 +2861,6 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 group_start);
u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags);
u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data);
-void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *ionde);
void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
int btrfs_check_data_free_space(struct inode *inode, u64 bytes);
void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes);
@@ -2661,6 +2914,8 @@ int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range);
int btrfs_init_space_info(struct btrfs_fs_info *fs_info);
+int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info);
/* ctree.c */
int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
int level, int *slot);
@@ -2680,6 +2935,21 @@ int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
struct btrfs_key *max_key,
struct btrfs_path *path, int cache_only,
u64 min_trans);
+enum btrfs_compare_tree_result {
+ BTRFS_COMPARE_TREE_NEW,
+ BTRFS_COMPARE_TREE_DELETED,
+ BTRFS_COMPARE_TREE_CHANGED,
+};
+typedef int (*btrfs_changed_cb_t)(struct btrfs_root *left_root,
+ struct btrfs_root *right_root,
+ struct btrfs_path *left_path,
+ struct btrfs_path *right_path,
+ struct btrfs_key *key,
+ enum btrfs_compare_tree_result result,
+ void *ctx);
+int btrfs_compare_trees(struct btrfs_root *left_root,
+ struct btrfs_root *right_root,
+ btrfs_changed_cb_t cb, void *ctx);
int btrfs_cow_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *buf,
struct extent_buffer *parent, int parent_slot,
@@ -2711,6 +2981,9 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
ins_len, int cow);
int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
struct btrfs_path *p, u64 time_seq);
+int btrfs_search_slot_for_read(struct btrfs_root *root,
+ struct btrfs_key *key, struct btrfs_path *p,
+ int find_higher, int return_any);
int btrfs_realloc_node(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *parent,
int start_slot, int cache_only, u64 *last_ret,
@@ -2793,11 +3066,22 @@ static inline void free_fs_info(struct btrfs_fs_info *fs_info)
kfree(fs_info->chunk_root);
kfree(fs_info->dev_root);
kfree(fs_info->csum_root);
+ kfree(fs_info->quota_root);
kfree(fs_info->super_copy);
kfree(fs_info->super_for_commit);
kfree(fs_info);
}
+/* tree mod log functions from ctree.c */
+u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
+ struct seq_list *elem);
+void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
+ struct seq_list *elem);
+static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
+{
+ return atomic_inc_return(&fs_info->tree_mod_seq);
+}
+
/* root-item.c */
int btrfs_find_root_ref(struct btrfs_root *tree_root,
struct btrfs_path *path,
@@ -2819,6 +3103,9 @@ int __must_check btrfs_update_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_key *key,
struct btrfs_root_item *item);
+void btrfs_read_root_item(struct btrfs_root *root,
+ struct extent_buffer *eb, int slot,
+ struct btrfs_root_item *item);
int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, struct
btrfs_root_item *item, struct btrfs_key *key);
int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid);
@@ -2826,6 +3113,8 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root);
void btrfs_set_root_node(struct btrfs_root_item *item,
struct extent_buffer *node);
void btrfs_check_and_init_root_item(struct btrfs_root_item *item);
+void btrfs_update_root_times(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root);
/* dir-item.c */
int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
@@ -3061,6 +3350,23 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root, const char *function,
unsigned int line, int errno);
+#define btrfs_set_fs_incompat(__fs_info, opt) \
+ __btrfs_set_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt)
+
+static inline void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info,
+ u64 flag)
+{
+ struct btrfs_super_block *disk_super;
+ u64 features;
+
+ disk_super = fs_info->super_copy;
+ features = btrfs_super_incompat_flags(disk_super);
+ if (!(features & flag)) {
+ features |= flag;
+ btrfs_set_super_incompat_flags(disk_super, features);
+ }
+}
+
#define btrfs_abort_transaction(trans, root, errno) \
do { \
__btrfs_abort_transaction(trans, root, __func__, \
@@ -3156,17 +3462,49 @@ void btrfs_reada_detach(void *handle);
int btree_readahead_hook(struct btrfs_root *root, struct extent_buffer *eb,
u64 start, int err);
-/* delayed seq elem */
-struct seq_list {
+/* qgroup.c */
+struct qgroup_update {
struct list_head list;
- u64 seq;
- u32 flags;
+ struct btrfs_delayed_ref_node *node;
+ struct btrfs_delayed_extent_op *extent_op;
};
-void btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
- struct seq_list *elem);
-void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
- struct seq_list *elem);
+int btrfs_quota_enable(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info);
+int btrfs_quota_disable(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info);
+int btrfs_quota_rescan(struct btrfs_fs_info *fs_info);
+int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info, u64 src, u64 dst);
+int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info, u64 src, u64 dst);
+int btrfs_create_qgroup(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info, u64 qgroupid,
+ char *name);
+int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info, u64 qgroupid);
+int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info, u64 qgroupid,
+ struct btrfs_qgroup_limit *limit);
+int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info);
+void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info);
+struct btrfs_delayed_extent_op;
+int btrfs_qgroup_record_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_node *node,
+ struct btrfs_delayed_extent_op *extent_op);
+int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_node *node,
+ struct btrfs_delayed_extent_op *extent_op);
+int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info);
+int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
+ struct btrfs_qgroup_inherit *inherit);
+int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes);
+void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes);
+
+void assert_qgroups_uptodate(struct btrfs_trans_handle *trans);
static inline int is_fstree(u64 rootid)
{
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 2399f408691..335605c8cea 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -62,6 +62,7 @@ static inline void btrfs_init_delayed_node(
INIT_LIST_HEAD(&delayed_node->n_list);
INIT_LIST_HEAD(&delayed_node->p_list);
delayed_node->bytes_reserved = 0;
+ memset(&delayed_node->inode_item, 0, sizeof(delayed_node->inode_item));
}
static inline int btrfs_is_continuous_delayed_item(
@@ -1113,8 +1114,8 @@ static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
* Returns < 0 on error and returns with an aborted transaction with any
* outstanding delayed items cleaned up.
*/
-int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, int nr)
{
struct btrfs_root *curr_root = root;
struct btrfs_delayed_root *delayed_root;
@@ -1122,6 +1123,7 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
struct btrfs_path *path;
struct btrfs_block_rsv *block_rsv;
int ret = 0;
+ bool count = (nr > 0);
if (trans->aborted)
return -EIO;
@@ -1137,7 +1139,7 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
delayed_root = btrfs_get_delayed_root(root);
curr_node = btrfs_first_delayed_node(delayed_root);
- while (curr_node) {
+ while (curr_node && (!count || (count && nr--))) {
curr_root = curr_node->root;
ret = btrfs_insert_delayed_items(trans, path, curr_root,
curr_node);
@@ -1149,6 +1151,7 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
path, curr_node);
if (ret) {
btrfs_release_delayed_node(curr_node);
+ curr_node = NULL;
btrfs_abort_transaction(trans, root, ret);
break;
}
@@ -1158,12 +1161,26 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
btrfs_release_delayed_node(prev_node);
}
+ if (curr_node)
+ btrfs_release_delayed_node(curr_node);
btrfs_free_path(path);
trans->block_rsv = block_rsv;
return ret;
}
+int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root)
+{
+ return __btrfs_run_delayed_items(trans, root, -1);
+}
+
+int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, int nr)
+{
+ return __btrfs_run_delayed_items(trans, root, nr);
+}
+
static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
struct btrfs_delayed_node *node)
{
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
index f5aa4023d3e..4f808e1baee 100644
--- a/fs/btrfs/delayed-inode.h
+++ b/fs/btrfs/delayed-inode.h
@@ -107,6 +107,8 @@ int btrfs_inode_delayed_dir_index_count(struct inode *inode);
int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
+int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, int nr);
void btrfs_balance_delayed_items(struct btrfs_root *root);
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 13ae7b04790..da7419ed01b 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -233,22 +233,26 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
return 0;
}
-int btrfs_check_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
+int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_root *delayed_refs,
u64 seq)
{
struct seq_list *elem;
-
- assert_spin_locked(&delayed_refs->lock);
- if (list_empty(&delayed_refs->seq_head))
- return 0;
-
- elem = list_first_entry(&delayed_refs->seq_head, struct seq_list, list);
- if (seq >= elem->seq) {
- pr_debug("holding back delayed_ref %llu, lowest is %llu (%p)\n",
- seq, elem->seq, delayed_refs);
- return 1;
+ int ret = 0;
+
+ spin_lock(&fs_info->tree_mod_seq_lock);
+ if (!list_empty(&fs_info->tree_mod_seq_list)) {
+ elem = list_first_entry(&fs_info->tree_mod_seq_list,
+ struct seq_list, list);
+ if (seq >= elem->seq) {
+ pr_debug("holding back delayed_ref %llu, lowest is "
+ "%llu (%p)\n", seq, elem->seq, delayed_refs);
+ ret = 1;
+ }
}
- return 0;
+
+ spin_unlock(&fs_info->tree_mod_seq_lock);
+ return ret;
}
int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
@@ -525,8 +529,8 @@ static noinline void add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
ref->is_head = 0;
ref->in_tree = 1;
- if (is_fstree(ref_root))
- seq = inc_delayed_seq(delayed_refs);
+ if (need_ref_seq(for_cow, ref_root))
+ seq = btrfs_get_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
ref->seq = seq;
full_ref = btrfs_delayed_node_to_tree_ref(ref);
@@ -584,8 +588,8 @@ static noinline void add_delayed_data_ref(struct btrfs_fs_info *fs_info,
ref->is_head = 0;
ref->in_tree = 1;
- if (is_fstree(ref_root))
- seq = inc_delayed_seq(delayed_refs);
+ if (need_ref_seq(for_cow, ref_root))
+ seq = btrfs_get_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
ref->seq = seq;
full_ref = btrfs_delayed_node_to_data_ref(ref);
@@ -658,10 +662,12 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr,
num_bytes, parent, ref_root, level, action,
for_cow);
- if (!is_fstree(ref_root) &&
- waitqueue_active(&delayed_refs->seq_wait))
- wake_up(&delayed_refs->seq_wait);
+ if (!need_ref_seq(for_cow, ref_root) &&
+ waitqueue_active(&fs_info->tree_mod_seq_wait))
+ wake_up(&fs_info->tree_mod_seq_wait);
spin_unlock(&delayed_refs->lock);
+ if (need_ref_seq(for_cow, ref_root))
+ btrfs_qgroup_record_ref(trans, &ref->node, extent_op);
return 0;
}
@@ -707,10 +713,12 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
add_delayed_data_ref(fs_info, trans, &ref->node, bytenr,
num_bytes, parent, ref_root, owner, offset,
action, for_cow);
- if (!is_fstree(ref_root) &&
- waitqueue_active(&delayed_refs->seq_wait))
- wake_up(&delayed_refs->seq_wait);
+ if (!need_ref_seq(for_cow, ref_root) &&
+ waitqueue_active(&fs_info->tree_mod_seq_wait))
+ wake_up(&fs_info->tree_mod_seq_wait);
spin_unlock(&delayed_refs->lock);
+ if (need_ref_seq(for_cow, ref_root))
+ btrfs_qgroup_record_ref(trans, &ref->node, extent_op);
return 0;
}
@@ -736,8 +744,8 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
extent_op->is_data);
- if (waitqueue_active(&delayed_refs->seq_wait))
- wake_up(&delayed_refs->seq_wait);
+ if (waitqueue_active(&fs_info->tree_mod_seq_wait))
+ wake_up(&fs_info->tree_mod_seq_wait);
spin_unlock(&delayed_refs->lock);
return 0;
}
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index 413927fb995..0d7c90c366b 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -139,26 +139,6 @@ struct btrfs_delayed_ref_root {
int flushing;
u64 run_delayed_start;
-
- /*
- * seq number of delayed refs. We need to know if a backref was being
- * added before the currently processed ref or afterwards.
- */
- u64 seq;
-
- /*
- * seq_list holds a list of all seq numbers that are currently being
- * added to the list. While walking backrefs (btrfs_find_all_roots,
- * qgroups), which might take some time, no newer ref must be processed,
- * as it might influence the outcome of the walk.
- */
- struct list_head seq_head;
-
- /*
- * when the only refs we have in the list must not be processed, we want
- * to wait for more refs to show up or for the end of backref walking.
- */
- wait_queue_head_t seq_wait;
};
static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
@@ -195,34 +175,28 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
struct list_head *cluster, u64 search_start);
-static inline u64 inc_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs)
-{
- assert_spin_locked(&delayed_refs->lock);
- ++delayed_refs->seq;
- return delayed_refs->seq;
-}
+int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_root *delayed_refs,
+ u64 seq);
-static inline void
-btrfs_get_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
- struct seq_list *elem)
+/*
+ * delayed refs with a ref_seq > 0 must be held back during backref walking.
+ * this only applies to items in one of the fs-trees. for_cow items never need
+ * to be held back, so they won't get a ref_seq number.
+ */
+static inline int need_ref_seq(int for_cow, u64 rootid)
{
- assert_spin_locked(&delayed_refs->lock);
- elem->seq = delayed_refs->seq;
- list_add_tail(&elem->list, &delayed_refs->seq_head);
-}
+ if (for_cow)
+ return 0;
-static inline void
-btrfs_put_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
- struct seq_list *elem)
-{
- spin_lock(&delayed_refs->lock);
- list_del(&elem->list);
- wake_up(&delayed_refs->seq_wait);
- spin_unlock(&delayed_refs->lock);
-}
+ if (rootid == BTRFS_FS_TREE_OBJECTID)
+ return 1;
-int btrfs_check_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
- u64 seq);
+ if ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID)
+ return 1;
+
+ return 0;
+}
/*
* a node might live in a head or a regular ref, this lets you
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 2936ca49b3b..502b20c56e8 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -407,7 +407,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
break;
}
- if (failed && !ret)
+ if (failed && !ret && failed_mirror)
repair_eb_io_failure(root, eb, failed_mirror);
return ret;
@@ -1182,6 +1182,8 @@ static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
root->defrag_running = 0;
root->root_key.objectid = objectid;
root->anon_dev = 0;
+
+ spin_lock_init(&root->root_times_lock);
}
static int __must_check find_and_setup_root(struct btrfs_root *tree_root,
@@ -1225,6 +1227,82 @@ static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info)
return root;
}
+struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+ u64 objectid)
+{
+ struct extent_buffer *leaf;
+ struct btrfs_root *tree_root = fs_info->tree_root;
+ struct btrfs_root *root;
+ struct btrfs_key key;
+ int ret = 0;
+ u64 bytenr;
+
+ root = btrfs_alloc_root(fs_info);
+ if (!root)
+ return ERR_PTR(-ENOMEM);
+
+ __setup_root(tree_root->nodesize, tree_root->leafsize,
+ tree_root->sectorsize, tree_root->stripesize,
+ root, fs_info, objectid);
+ root->root_key.objectid = objectid;
+ root->root_key.type = BTRFS_ROOT_ITEM_KEY;
+ root->root_key.offset = 0;
+
+ leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
+ 0, objectid, NULL, 0, 0, 0);
+ if (IS_ERR(leaf)) {
+ ret = PTR_ERR(leaf);
+ goto fail;
+ }
+
+ bytenr = leaf->start;
+ memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
+ btrfs_set_header_bytenr(leaf, leaf->start);
+ btrfs_set_header_generation(leaf, trans->transid);
+ btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
+ btrfs_set_header_owner(leaf, objectid);
+ root->node = leaf;
+
+ write_extent_buffer(leaf, fs_info->fsid,
+ (unsigned long)btrfs_header_fsid(leaf),
+ BTRFS_FSID_SIZE);
+ write_extent_buffer(leaf, fs_info->chunk_tree_uuid,
+ (unsigned long)btrfs_header_chunk_tree_uuid(leaf),
+ BTRFS_UUID_SIZE);
+ btrfs_mark_buffer_dirty(leaf);
+
+ root->commit_root = btrfs_root_node(root);
+ root->track_dirty = 1;
+
+
+ root->root_item.flags = 0;
+ root->root_item.byte_limit = 0;
+ btrfs_set_root_bytenr(&root->root_item, leaf->start);
+ btrfs_set_root_generation(&root->root_item, trans->transid);
+ btrfs_set_root_level(&root->root_item, 0);
+ btrfs_set_root_refs(&root->root_item, 1);
+ btrfs_set_root_used(&root->root_item, leaf->len);
+ btrfs_set_root_last_snapshot(&root->root_item, 0);
+ btrfs_set_root_dirid(&root->root_item, 0);
+ root->root_item.drop_level = 0;
+
+ key.objectid = objectid;
+ key.type = BTRFS_ROOT_ITEM_KEY;
+ key.offset = 0;
+ ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
+ if (ret)
+ goto fail;
+
+ btrfs_tree_unlock(leaf);
+
+fail:
+ if (ret)
+ return ERR_PTR(ret);
+
+ return root;
+}
+
static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info)
{
@@ -1326,6 +1404,7 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
u64 generation;
u32 blocksize;
int ret = 0;
+ int slot;
root = btrfs_alloc_root(fs_info);
if (!root)
@@ -1352,9 +1431,8 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
if (ret == 0) {
l = path->nodes[0];
- read_extent_buffer(l, &root->root_item,
- btrfs_item_ptr_offset(l, path->slots[0]),
- sizeof(root->root_item));
+ slot = path->slots[0];
+ btrfs_read_root_item(tree_root, l, slot, &root->root_item);
memcpy(&root->root_key, location, sizeof(*location));
}
btrfs_free_path(path);
@@ -1396,6 +1474,9 @@ struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
return fs_info->dev_root;
if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
return fs_info->csum_root;
+ if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID)
+ return fs_info->quota_root ? fs_info->quota_root :
+ ERR_PTR(-ENOENT);
again:
spin_lock(&fs_info->fs_roots_radix_lock);
root = radix_tree_lookup(&fs_info->fs_roots_radix,
@@ -1823,6 +1904,10 @@ static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
free_extent_buffer(info->extent_root->commit_root);
free_extent_buffer(info->csum_root->node);
free_extent_buffer(info->csum_root->commit_root);
+ if (info->quota_root) {
+ free_extent_buffer(info->quota_root->node);
+ free_extent_buffer(info->quota_root->commit_root);
+ }
info->tree_root->node = NULL;
info->tree_root->commit_root = NULL;
@@ -1832,6 +1917,10 @@ static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
info->extent_root->commit_root = NULL;
info->csum_root->node = NULL;
info->csum_root->commit_root = NULL;
+ if (info->quota_root) {
+ info->quota_root->node = NULL;
+ info->quota_root->commit_root = NULL;
+ }
if (chunk_root) {
free_extent_buffer(info->chunk_root->node);
@@ -1862,6 +1951,7 @@ int open_ctree(struct super_block *sb,
struct btrfs_root *csum_root;
struct btrfs_root *chunk_root;
struct btrfs_root *dev_root;
+ struct btrfs_root *quota_root;
struct btrfs_root *log_tree_root;
int ret;
int err = -EINVAL;
@@ -1873,9 +1963,10 @@ int open_ctree(struct super_block *sb,
csum_root = fs_info->csum_root = btrfs_alloc_root(fs_info);
chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info);
dev_root = fs_info->dev_root = btrfs_alloc_root(fs_info);
+ quota_root = fs_info->quota_root = btrfs_alloc_root(fs_info);
if (!tree_root || !extent_root || !csum_root ||
- !chunk_root || !dev_root) {
+ !chunk_root || !dev_root || !quota_root) {
err = -ENOMEM;
goto fail;
}
@@ -1944,6 +2035,8 @@ int open_ctree(struct super_block *sb,
fs_info->free_chunk_space = 0;
fs_info->tree_mod_log = RB_ROOT;
+ init_waitqueue_head(&fs_info->tree_mod_seq_wait);
+
/* readahead state */
INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
spin_lock_init(&fs_info->reada_lock);
@@ -2032,6 +2125,13 @@ int open_ctree(struct super_block *sb,
init_rwsem(&fs_info->cleanup_work_sem);
init_rwsem(&fs_info->subvol_sem);
+ spin_lock_init(&fs_info->qgroup_lock);
+ fs_info->qgroup_tree = RB_ROOT;
+ INIT_LIST_HEAD(&fs_info->dirty_qgroups);
+ fs_info->qgroup_seq = 1;
+ fs_info->quota_enabled = 0;
+ fs_info->pending_quota_state = 0;
+
btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
@@ -2244,7 +2344,7 @@ int open_ctree(struct super_block *sb,
ret |= btrfs_start_workers(&fs_info->caching_workers);
ret |= btrfs_start_workers(&fs_info->readahead_workers);
if (ret) {
- ret = -ENOMEM;
+ err = -ENOMEM;
goto fail_sb_buffer;
}
@@ -2356,6 +2456,17 @@ retry_root_backup:
goto recovery_tree_root;
csum_root->track_dirty = 1;
+ ret = find_and_setup_root(tree_root, fs_info,
+ BTRFS_QUOTA_TREE_OBJECTID, quota_root);
+ if (ret) {
+ kfree(quota_root);
+ quota_root = fs_info->quota_root = NULL;
+ } else {
+ quota_root->track_dirty = 1;
+ fs_info->quota_enabled = 1;
+ fs_info->pending_quota_state = 1;
+ }
+
fs_info->generation = generation;
fs_info->last_trans_committed = generation;
@@ -2415,6 +2526,9 @@ retry_root_backup:
" integrity check module %s\n", sb->s_id);
}
#endif
+ ret = btrfs_read_qgroup_config(fs_info);
+ if (ret)
+ goto fail_trans_kthread;
/* do not make disk changes in broken FS */
if (btrfs_super_log_root(disk_super) != 0 &&
@@ -2425,7 +2539,7 @@ retry_root_backup:
printk(KERN_WARNING "Btrfs log replay required "
"on RO media\n");
err = -EIO;
- goto fail_trans_kthread;
+ goto fail_qgroup;
}
blocksize =
btrfs_level_size(tree_root,
@@ -2434,7 +2548,7 @@ retry_root_backup:
log_tree_root = btrfs_alloc_root(fs_info);
if (!log_tree_root) {
err = -ENOMEM;
- goto fail_trans_kthread;
+ goto fail_qgroup;
}
__setup_root(nodesize, leafsize, sectorsize, stripesize,
@@ -2466,15 +2580,15 @@ retry_root_backup:
if (!(sb->s_flags & MS_RDONLY)) {
ret = btrfs_cleanup_fs_roots(fs_info);
- if (ret) {
- }
+ if (ret)
+ goto fail_trans_kthread;
ret = btrfs_recover_relocation(tree_root);
if (ret < 0) {
printk(KERN_WARNING
"btrfs: failed to recover relocation\n");
err = -EINVAL;
- goto fail_trans_kthread;
+ goto fail_qgroup;
}
}
@@ -2484,10 +2598,10 @@ retry_root_backup:
fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
if (!fs_info->fs_root)
- goto fail_trans_kthread;
+ goto fail_qgroup;
if (IS_ERR(fs_info->fs_root)) {
err = PTR_ERR(fs_info->fs_root);
- goto fail_trans_kthread;
+ goto fail_qgroup;
}
if (sb->s_flags & MS_RDONLY)
@@ -2511,6 +2625,8 @@ retry_root_backup:
return 0;
+fail_qgroup:
+ btrfs_free_qgroup_config(fs_info);
fail_trans_kthread:
kthread_stop(fs_info->transaction_kthread);
fail_cleaner:
@@ -3109,6 +3225,8 @@ int close_ctree(struct btrfs_root *root)
fs_info->closing = 2;
smp_mb();
+ btrfs_free_qgroup_config(root->fs_info);
+
if (fs_info->delalloc_bytes) {
printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n",
(unsigned long long)fs_info->delalloc_bytes);
@@ -3128,6 +3246,10 @@ int close_ctree(struct btrfs_root *root)
free_extent_buffer(fs_info->dev_root->commit_root);
free_extent_buffer(fs_info->csum_root->node);
free_extent_buffer(fs_info->csum_root->commit_root);
+ if (fs_info->quota_root) {
+ free_extent_buffer(fs_info->quota_root->node);
+ free_extent_buffer(fs_info->quota_root->commit_root);
+ }
btrfs_free_block_groups(fs_info);
@@ -3258,7 +3380,7 @@ int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
return btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
}
-static int btree_lock_page_hook(struct page *page, void *data,
+int btree_lock_page_hook(struct page *page, void *data,
void (*flush_fn)(void *))
{
struct inode *inode = page->mapping->host;
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 05b3fab39f7..95e147eea23 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -89,6 +89,12 @@ int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
int btrfs_cleanup_transaction(struct btrfs_root *root);
void btrfs_cleanup_one_transaction(struct btrfs_transaction *trans,
struct btrfs_root *root);
+void btrfs_abort_devices(struct btrfs_root *root);
+struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+ u64 objectid);
+int btree_lock_page_hook(struct page *page, void *data,
+ void (*flush_fn)(void *));
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void btrfs_init_lockdep(void);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 6e1d36702ff..4e1b153b7c4 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -34,6 +34,8 @@
#include "locking.h"
#include "free-space-cache.h"
+#undef SCRAMBLE_DELAYED_REFS
+
/*
* control flags for do_chunk_alloc's force field
* CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
@@ -2217,6 +2219,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_node *ref;
struct btrfs_delayed_ref_head *locked_ref = NULL;
struct btrfs_delayed_extent_op *extent_op;
+ struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
int count = 0;
int must_insert_reserved = 0;
@@ -2255,7 +2258,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
ref = select_delayed_ref(locked_ref);
if (ref && ref->seq &&
- btrfs_check_delayed_seq(delayed_refs, ref->seq)) {
+ btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
/*
* there are still refs with lower seq numbers in the
* process of being added. Don't run this ref yet.
@@ -2337,7 +2340,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
}
next:
- do_chunk_alloc(trans, root->fs_info->extent_root,
+ do_chunk_alloc(trans, fs_info->extent_root,
2 * 1024 * 1024,
btrfs_get_alloc_profile(root, 0),
CHUNK_ALLOC_NO_FORCE);
@@ -2347,21 +2350,99 @@ next:
return count;
}
-static void wait_for_more_refs(struct btrfs_delayed_ref_root *delayed_refs,
+static void wait_for_more_refs(struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_root *delayed_refs,
unsigned long num_refs,
struct list_head *first_seq)
{
spin_unlock(&delayed_refs->lock);
pr_debug("waiting for more refs (num %ld, first %p)\n",
num_refs, first_seq);
- wait_event(delayed_refs->seq_wait,
+ wait_event(fs_info->tree_mod_seq_wait,
num_refs != delayed_refs->num_entries ||
- delayed_refs->seq_head.next != first_seq);
+ fs_info->tree_mod_seq_list.next != first_seq);
pr_debug("done waiting for more refs (num %ld, first %p)\n",
- delayed_refs->num_entries, delayed_refs->seq_head.next);
+ delayed_refs->num_entries, fs_info->tree_mod_seq_list.next);
spin_lock(&delayed_refs->lock);
}
+#ifdef SCRAMBLE_DELAYED_REFS
+/*
+ * Normally delayed refs get processed in ascending bytenr order. This
+ * correlates in most cases to the order added. To expose dependencies on this
+ * order, we start to process the tree in the middle instead of the beginning
+ */
+static u64 find_middle(struct rb_root *root)
+{
+ struct rb_node *n = root->rb_node;
+ struct btrfs_delayed_ref_node *entry;
+ int alt = 1;
+ u64 middle;
+ u64 first = 0, last = 0;
+
+ n = rb_first(root);
+ if (n) {
+ entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
+ first = entry->bytenr;
+ }
+ n = rb_last(root);
+ if (n) {
+ entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
+ last = entry->bytenr;
+ }
+ n = root->rb_node;
+
+ while (n) {
+ entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
+ WARN_ON(!entry->in_tree);
+
+ middle = entry->bytenr;
+
+ if (alt)
+ n = n->rb_left;
+ else
+ n = n->rb_right;
+
+ alt = 1 - alt;
+ }
+ return middle;
+}
+#endif
+
+int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info)
+{
+ struct qgroup_update *qgroup_update;
+ int ret = 0;
+
+ if (list_empty(&trans->qgroup_ref_list) !=
+ !trans->delayed_ref_elem.seq) {
+ /* list without seq or seq without list */
+ printk(KERN_ERR "btrfs: qgroup accounting update error, list is%s empty, seq is %llu\n",
+ list_empty(&trans->qgroup_ref_list) ? "" : " not",
+ trans->delayed_ref_elem.seq);
+ BUG();
+ }
+
+ if (!trans->delayed_ref_elem.seq)
+ return 0;
+
+ while (!list_empty(&trans->qgroup_ref_list)) {
+ qgroup_update = list_first_entry(&trans->qgroup_ref_list,
+ struct qgroup_update, list);
+ list_del(&qgroup_update->list);
+ if (!ret)
+ ret = btrfs_qgroup_account_ref(
+ trans, fs_info, qgroup_update->node,
+ qgroup_update->extent_op);
+ kfree(qgroup_update);
+ }
+
+ btrfs_put_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
+
+ return ret;
+}
+
/*
* this starts processing the delayed reference count updates and
* extent insertions we have queued up so far. count can be
@@ -2398,11 +2479,18 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2 * 1024 * 1024, btrfs_get_alloc_profile(root, 0),
CHUNK_ALLOC_NO_FORCE);
+ btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
+
delayed_refs = &trans->transaction->delayed_refs;
INIT_LIST_HEAD(&cluster);
again:
consider_waiting = 0;
spin_lock(&delayed_refs->lock);
+
+#ifdef SCRAMBLE_DELAYED_REFS
+ delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
+#endif
+
if (count == 0) {
count = delayed_refs->num_entries * 2;
run_most = 1;
@@ -2437,7 +2525,7 @@ again:
num_refs = delayed_refs->num_entries;
first_seq = root->fs_info->tree_mod_seq_list.next;
} else {
- wait_for_more_refs(delayed_refs,
+ wait_for_more_refs(root->fs_info, delayed_refs,
num_refs, first_seq);
/*
* after waiting, things have changed. we
@@ -2502,6 +2590,7 @@ again:
}
out:
spin_unlock(&delayed_refs->lock);
+ assert_qgroups_uptodate(trans);
return 0;
}
@@ -2581,8 +2670,10 @@ static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
node = rb_prev(node);
if (node) {
+ int seq = ref->seq;
+
ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
- if (ref->bytenr == bytenr)
+ if (ref->bytenr == bytenr && ref->seq == seq)
goto out_unlock;
}
@@ -2903,8 +2994,13 @@ again:
}
spin_lock(&block_group->lock);
- if (block_group->cached != BTRFS_CACHE_FINISHED) {
- /* We're not cached, don't bother trying to write stuff out */
+ if (block_group->cached != BTRFS_CACHE_FINISHED ||
+ !btrfs_test_opt(root, SPACE_CACHE)) {
+ /*
+ * don't bother trying to write stuff out _if_
+ * a) we're not cached,
+ * b) we're with nospace_cache mount option.
+ */
dcs = BTRFS_DC_WRITTEN;
spin_unlock(&block_group->lock);
goto out_put;
@@ -3134,6 +3230,8 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
init_waitqueue_head(&found->wait);
*space_info = found;
list_add_rcu(&found->list, &info->space_info);
+ if (flags & BTRFS_BLOCK_GROUP_DATA)
+ info->data_sinfo = found;
return 0;
}
@@ -3263,12 +3361,6 @@ u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
return get_alloc_profile(root, flags);
}
-void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
-{
- BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
- BTRFS_BLOCK_GROUP_DATA);
-}
-
/*
* This will check the space that the inode allocates from to make sure we have
* enough space for bytes.
@@ -3277,6 +3369,7 @@ int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
{
struct btrfs_space_info *data_sinfo;
struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
u64 used;
int ret = 0, committed = 0, alloc_chunk = 1;
@@ -3289,7 +3382,7 @@ int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
committed = 1;
}
- data_sinfo = BTRFS_I(inode)->space_info;
+ data_sinfo = fs_info->data_sinfo;
if (!data_sinfo)
goto alloc;
@@ -3330,10 +3423,9 @@ alloc:
goto commit_trans;
}
- if (!data_sinfo) {
- btrfs_set_inode_space_info(root, inode);
- data_sinfo = BTRFS_I(inode)->space_info;
- }
+ if (!data_sinfo)
+ data_sinfo = fs_info->data_sinfo;
+
goto again;
}
@@ -3380,7 +3472,7 @@ void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
/* make sure bytes are sectorsize aligned */
bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
- data_sinfo = BTRFS_I(inode)->space_info;
+ data_sinfo = root->fs_info->data_sinfo;
spin_lock(&data_sinfo->lock);
data_sinfo->bytes_may_use -= bytes;
trace_btrfs_space_reservation(root->fs_info, "space_info",
@@ -3586,89 +3678,58 @@ out:
/*
* shrink metadata reservation for delalloc
*/
-static int shrink_delalloc(struct btrfs_root *root, u64 to_reclaim,
- bool wait_ordered)
+static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
+ bool wait_ordered)
{
struct btrfs_block_rsv *block_rsv;
struct btrfs_space_info *space_info;
struct btrfs_trans_handle *trans;
- u64 reserved;
+ u64 delalloc_bytes;
u64 max_reclaim;
- u64 reclaimed = 0;
long time_left;
unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
int loops = 0;
- unsigned long progress;
trans = (struct btrfs_trans_handle *)current->journal_info;
block_rsv = &root->fs_info->delalloc_block_rsv;
space_info = block_rsv->space_info;
smp_mb();
- reserved = space_info->bytes_may_use;
- progress = space_info->reservation_progress;
-
- if (reserved == 0)
- return 0;
-
- smp_mb();
- if (root->fs_info->delalloc_bytes == 0) {
+ delalloc_bytes = root->fs_info->delalloc_bytes;
+ if (delalloc_bytes == 0) {
if (trans)
- return 0;
+ return;
btrfs_wait_ordered_extents(root, 0, 0);
- return 0;
+ return;
}
- max_reclaim = min(reserved, to_reclaim);
- nr_pages = max_t(unsigned long, nr_pages,
- max_reclaim >> PAGE_CACHE_SHIFT);
- while (loops < 1024) {
- /* have the flusher threads jump in and do some IO */
- smp_mb();
- nr_pages = min_t(unsigned long, nr_pages,
- root->fs_info->delalloc_bytes >> PAGE_CACHE_SHIFT);
+ while (delalloc_bytes && loops < 3) {
+ max_reclaim = min(delalloc_bytes, to_reclaim);
+ nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages,
- WB_REASON_FS_FREE_SPACE);
+ WB_REASON_FS_FREE_SPACE);
spin_lock(&space_info->lock);
- if (reserved > space_info->bytes_may_use)
- reclaimed += reserved - space_info->bytes_may_use;
- reserved = space_info->bytes_may_use;
+ if (space_info->bytes_used + space_info->bytes_reserved +
+ space_info->bytes_pinned + space_info->bytes_readonly +
+ space_info->bytes_may_use + orig <=
+ space_info->total_bytes) {
+ spin_unlock(&space_info->lock);
+ break;
+ }
spin_unlock(&space_info->lock);
loops++;
-
- if (reserved == 0 || reclaimed >= max_reclaim)
- break;
-
- if (trans && trans->transaction->blocked)
- return -EAGAIN;
-
if (wait_ordered && !trans) {
btrfs_wait_ordered_extents(root, 0, 0);
} else {
- time_left = schedule_timeout_interruptible(1);
-
- /* We were interrupted, exit */
+ time_left = schedule_timeout_killable(1);
if (time_left)
break;
}
-
- /* we've kicked the IO a few times, if anything has been freed,
- * exit. There is no sense in looping here for a long time
- * when we really need to commit the transaction, or there are
- * just too many writers without enough free space
- */
-
- if (loops > 3) {
- smp_mb();
- if (progress != space_info->reservation_progress)
- break;
- }
-
+ smp_mb();
+ delalloc_bytes = root->fs_info->delalloc_bytes;
}
-
- return reclaimed >= to_reclaim;
}
/**
@@ -3728,6 +3789,58 @@ commit:
return btrfs_commit_transaction(trans, root);
}
+enum flush_state {
+ FLUSH_DELALLOC = 1,
+ FLUSH_DELALLOC_WAIT = 2,
+ FLUSH_DELAYED_ITEMS_NR = 3,
+ FLUSH_DELAYED_ITEMS = 4,
+ COMMIT_TRANS = 5,
+};
+
+static int flush_space(struct btrfs_root *root,
+ struct btrfs_space_info *space_info, u64 num_bytes,
+ u64 orig_bytes, int state)
+{
+ struct btrfs_trans_handle *trans;
+ int nr;
+ int ret = 0;
+
+ switch (state) {
+ case FLUSH_DELALLOC:
+ case FLUSH_DELALLOC_WAIT:
+ shrink_delalloc(root, num_bytes, orig_bytes,
+ state == FLUSH_DELALLOC_WAIT);
+ break;
+ case FLUSH_DELAYED_ITEMS_NR:
+ case FLUSH_DELAYED_ITEMS:
+ if (state == FLUSH_DELAYED_ITEMS_NR) {
+ u64 bytes = btrfs_calc_trans_metadata_size(root, 1);
+
+ nr = (int)div64_u64(num_bytes, bytes);
+ if (!nr)
+ nr = 1;
+ nr *= 2;
+ } else {
+ nr = -1;
+ }
+ trans = btrfs_join_transaction(root);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ break;
+ }
+ ret = btrfs_run_delayed_items_nr(trans, root, nr);
+ btrfs_end_transaction(trans, root);
+ break;
+ case COMMIT_TRANS:
+ ret = may_commit_transaction(root, space_info, orig_bytes, 0);
+ break;
+ default:
+ ret = -ENOSPC;
+ break;
+ }
+
+ return ret;
+}
/**
* reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
* @root - the root we're allocating for
@@ -3749,11 +3862,10 @@ static int reserve_metadata_bytes(struct btrfs_root *root,
struct btrfs_space_info *space_info = block_rsv->space_info;
u64 used;
u64 num_bytes = orig_bytes;
- int retries = 0;
+ int flush_state = FLUSH_DELALLOC;
int ret = 0;
- bool committed = false;
bool flushing = false;
- bool wait_ordered = false;
+ bool committed = false;
again:
ret = 0;
@@ -3812,9 +3924,8 @@ again:
* amount plus the amount of bytes that we need for this
* reservation.
*/
- wait_ordered = true;
num_bytes = used - space_info->total_bytes +
- (orig_bytes * (retries + 1));
+ (orig_bytes * 2);
}
if (ret) {
@@ -3867,8 +3978,6 @@ again:
trace_btrfs_space_reservation(root->fs_info,
"space_info", space_info->flags, orig_bytes, 1);
ret = 0;
- } else {
- wait_ordered = true;
}
}
@@ -3887,36 +3996,13 @@ again:
if (!ret || !flush)
goto out;
- /*
- * We do synchronous shrinking since we don't actually unreserve
- * metadata until after the IO is completed.
- */
- ret = shrink_delalloc(root, num_bytes, wait_ordered);
- if (ret < 0)
- goto out;
-
- ret = 0;
-
- /*
- * So if we were overcommitted it's possible that somebody else flushed
- * out enough space and we simply didn't have enough space to reclaim,
- * so go back around and try again.
- */
- if (retries < 2) {
- wait_ordered = true;
- retries++;
+ ret = flush_space(root, space_info, num_bytes, orig_bytes,
+ flush_state);
+ flush_state++;
+ if (!ret)
goto again;
- }
-
- ret = -ENOSPC;
- if (committed)
- goto out;
-
- ret = may_commit_transaction(root, space_info, orig_bytes, 0);
- if (!ret) {
- committed = true;
+ else if (flush_state <= COMMIT_TRANS)
goto again;
- }
out:
if (flushing) {
@@ -3934,7 +4020,10 @@ static struct btrfs_block_rsv *get_block_rsv(
{
struct btrfs_block_rsv *block_rsv = NULL;
- if (root->ref_cows || root == root->fs_info->csum_root)
+ if (root->ref_cows)
+ block_rsv = trans->block_rsv;
+
+ if (root == root->fs_info->csum_root && trans->adding_csums)
block_rsv = trans->block_rsv;
if (!block_rsv)
@@ -4286,6 +4375,9 @@ static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
+ if (!trans->block_rsv)
+ return;
+
if (!trans->bytes_reserved)
return;
@@ -4444,7 +4536,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
int ret;
/* Need to be holding the i_mutex here if we aren't free space cache */
- if (btrfs_is_free_space_inode(root, inode))
+ if (btrfs_is_free_space_inode(inode))
flush = 0;
if (flush && btrfs_transaction_in_commit(root->fs_info))
@@ -4476,6 +4568,13 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
csum_bytes = BTRFS_I(inode)->csum_bytes;
spin_unlock(&BTRFS_I(inode)->lock);
+ if (root->fs_info->quota_enabled) {
+ ret = btrfs_qgroup_reserve(root, num_bytes +
+ nr_extents * root->leafsize);
+ if (ret)
+ return ret;
+ }
+
ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
if (ret) {
u64 to_free = 0;
@@ -4554,6 +4653,11 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
trace_btrfs_space_reservation(root->fs_info, "delalloc",
btrfs_ino(inode), to_free, 0);
+ if (root->fs_info->quota_enabled) {
+ btrfs_qgroup_free(root, num_bytes +
+ dropped * root->leafsize);
+ }
+
btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
to_free);
}
@@ -5190,8 +5294,9 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
rb_erase(&head->node.rb_node, &delayed_refs->root);
delayed_refs->num_entries--;
- if (waitqueue_active(&delayed_refs->seq_wait))
- wake_up(&delayed_refs->seq_wait);
+ smp_mb();
+ if (waitqueue_active(&root->fs_info->tree_mod_seq_wait))
+ wake_up(&root->fs_info->tree_mod_seq_wait);
/*
* we don't take a ref on the node because we're removing it from the
@@ -5748,7 +5853,11 @@ loop:
ret = do_chunk_alloc(trans, root, num_bytes +
2 * 1024 * 1024, data,
CHUNK_ALLOC_LIMITED);
- if (ret < 0) {
+ /*
+ * Do not bail out on ENOSPC since we
+ * can do more things.
+ */
+ if (ret < 0 && ret != -ENOSPC) {
btrfs_abort_transaction(trans,
root, ret);
goto out;
@@ -5816,13 +5925,13 @@ static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
again:
list_for_each_entry(cache, &info->block_groups[index], list) {
spin_lock(&cache->lock);
- printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
- "%llu pinned %llu reserved\n",
+ printk(KERN_INFO "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s\n",
(unsigned long long)cache->key.objectid,
(unsigned long long)cache->key.offset,
(unsigned long long)btrfs_block_group_used(&cache->item),
(unsigned long long)cache->pinned,
- (unsigned long long)cache->reserved);
+ (unsigned long long)cache->reserved,
+ cache->ro ? "[readonly]" : "");
btrfs_dump_free_space(cache, bytes);
spin_unlock(&cache->lock);
}
@@ -7610,8 +7719,21 @@ int btrfs_read_block_groups(struct btrfs_root *root)
INIT_LIST_HEAD(&cache->list);
INIT_LIST_HEAD(&cache->cluster_list);
- if (need_clear)
+ if (need_clear) {
+ /*
+ * When we mount with old space cache, we need to
+ * set BTRFS_DC_CLEAR and set dirty flag.
+ *
+ * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
+ * truncate the old free space cache inode and
+ * setup a new one.
+ * b) Setting 'dirty flag' makes sure that we flush
+ * the new space cache info onto disk.
+ */
cache->disk_cache_state = BTRFS_DC_CLEAR;
+ if (btrfs_test_opt(root, SPACE_CACHE))
+ cache->dirty = 1;
+ }
read_extent_buffer(leaf, &cache->item,
btrfs_item_ptr_offset(leaf, path->slots[0]),
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index deafe19c34b..45c81bb4ac8 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1919,7 +1919,7 @@ int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
return -EIO;
}
- printk_in_rcu(KERN_INFO "btrfs read error corrected: ino %lu off %llu "
+ printk_ratelimited_in_rcu(KERN_INFO "btrfs read error corrected: ino %lu off %llu "
"(dev %s sector %llu)\n", page->mapping->host->i_ino,
start, rcu_str_deref(dev->name), sector);
@@ -3078,8 +3078,15 @@ static int lock_extent_buffer_for_io(struct extent_buffer *eb,
}
}
+ /*
+ * We need to do this to prevent races in people who check if the eb is
+ * under IO since we can end up having no IO bits set for a short period
+ * of time.
+ */
+ spin_lock(&eb->refs_lock);
if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
+ spin_unlock(&eb->refs_lock);
btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
spin_lock(&fs_info->delalloc_lock);
if (fs_info->dirty_metadata_bytes >= eb->len)
@@ -3088,6 +3095,8 @@ static int lock_extent_buffer_for_io(struct extent_buffer *eb,
WARN_ON(1);
spin_unlock(&fs_info->delalloc_lock);
ret = 1;
+ } else {
+ spin_unlock(&eb->refs_lock);
}
btrfs_tree_unlock(eb);
@@ -3558,19 +3567,38 @@ int extent_readpages(struct extent_io_tree *tree,
struct bio *bio = NULL;
unsigned page_idx;
unsigned long bio_flags = 0;
+ struct page *pagepool[16];
+ struct page *page;
+ int i = 0;
+ int nr = 0;
for (page_idx = 0; page_idx < nr_pages; page_idx++) {
- struct page *page = list_entry(pages->prev, struct page, lru);
+ page = list_entry(pages->prev, struct page, lru);
prefetchw(&page->flags);
list_del(&page->lru);
- if (!add_to_page_cache_lru(page, mapping,
+ if (add_to_page_cache_lru(page, mapping,
page->index, GFP_NOFS)) {
- __extent_read_full_page(tree, page, get_extent,
- &bio, 0, &bio_flags);
+ page_cache_release(page);
+ continue;
}
- page_cache_release(page);
+
+ pagepool[nr++] = page;
+ if (nr < ARRAY_SIZE(pagepool))
+ continue;
+ for (i = 0; i < nr; i++) {
+ __extent_read_full_page(tree, pagepool[i], get_extent,
+ &bio, 0, &bio_flags);
+ page_cache_release(pagepool[i]);
+ }
+ nr = 0;
+ }
+ for (i = 0; i < nr; i++) {
+ __extent_read_full_page(tree, pagepool[i], get_extent,
+ &bio, 0, &bio_flags);
+ page_cache_release(pagepool[i]);
}
+
BUG_ON(!list_empty(pages));
if (bio)
return submit_one_bio(READ, bio, 0, bio_flags);
@@ -4124,11 +4152,10 @@ static void check_buffer_tree_ref(struct extent_buffer *eb)
* So bump the ref count first, then set the bit. If someone
* beat us to it, drop the ref we added.
*/
- if (!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
+ spin_lock(&eb->refs_lock);
+ if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
atomic_inc(&eb->refs);
- if (test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
- atomic_dec(&eb->refs);
- }
+ spin_unlock(&eb->refs_lock);
}
static void mark_extent_buffer_accessed(struct extent_buffer *eb)
@@ -4240,9 +4267,7 @@ again:
goto free_eb;
}
/* add one reference for the tree */
- spin_lock(&eb->refs_lock);
check_buffer_tree_ref(eb);
- spin_unlock(&eb->refs_lock);
spin_unlock(&tree->buffer_lock);
radix_tree_preload_end();
@@ -4301,7 +4326,7 @@ static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
}
/* Expects to have eb->eb_lock already held */
-static void release_extent_buffer(struct extent_buffer *eb, gfp_t mask)
+static int release_extent_buffer(struct extent_buffer *eb, gfp_t mask)
{
WARN_ON(atomic_read(&eb->refs) == 0);
if (atomic_dec_and_test(&eb->refs)) {
@@ -4322,9 +4347,11 @@ static void release_extent_buffer(struct extent_buffer *eb, gfp_t mask)
btrfs_release_extent_buffer_page(eb, 0);
call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
- return;
+ return 1;
}
spin_unlock(&eb->refs_lock);
+
+ return 0;
}
void free_extent_buffer(struct extent_buffer *eb)
@@ -4963,7 +4990,6 @@ int try_release_extent_buffer(struct page *page, gfp_t mask)
spin_unlock(&eb->refs_lock);
return 0;
}
- release_extent_buffer(eb, mask);
- return 1;
+ return release_extent_buffer(eb, mask);
}
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 5d158d32023..b45b9de0c21 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -183,7 +183,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
* read from the commit root and sidestep a nasty deadlock
* between reading the free space cache and updating the csum tree.
*/
- if (btrfs_is_free_space_inode(root, inode)) {
+ if (btrfs_is_free_space_inode(inode)) {
path->search_commit_root = 1;
path->skip_locking = 1;
}
@@ -690,6 +690,7 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
return -ENOMEM;
sector_sum = sums->sums;
+ trans->adding_csums = 1;
again:
next_offset = (u64)-1;
found_next = 0;
@@ -853,6 +854,7 @@ next_sector:
goto again;
}
out:
+ trans->adding_csums = 0;
btrfs_free_path(path);
return ret;
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 6c4e2baa929..6b10acfc2f5 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -1968,7 +1968,7 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
info = rb_entry(n, struct btrfs_free_space, offset_index);
- if (info->bytes >= bytes)
+ if (info->bytes >= bytes && !block_group->ro)
count++;
printk(KERN_CRIT "entry offset %llu, bytes %llu, bitmap %s\n",
(unsigned long long)info->offset,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index fb8d671d00e..48bdfd2591c 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -825,7 +825,7 @@ static noinline int cow_file_range(struct inode *inode,
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
int ret = 0;
- BUG_ON(btrfs_is_free_space_inode(root, inode));
+ BUG_ON(btrfs_is_free_space_inode(inode));
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
extent_clear_unlock_delalloc(inode,
@@ -1010,7 +1010,7 @@ static noinline void async_cow_submit(struct btrfs_work *work)
atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
if (atomic_read(&root->fs_info->async_delalloc_pages) <
- 5 * 1042 * 1024 &&
+ 5 * 1024 * 1024 &&
waitqueue_active(&root->fs_info->async_submit_wait))
wake_up(&root->fs_info->async_submit_wait);
@@ -1035,7 +1035,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
struct btrfs_root *root = BTRFS_I(inode)->root;
unsigned long nr_pages;
u64 cur_end;
- int limit = 10 * 1024 * 1042;
+ int limit = 10 * 1024 * 1024;
clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
1, 0, NULL, GFP_NOFS);
@@ -1153,7 +1153,7 @@ static noinline int run_delalloc_nocow(struct inode *inode,
return -ENOMEM;
}
- nolock = btrfs_is_free_space_inode(root, inode);
+ nolock = btrfs_is_free_space_inode(inode);
if (nolock)
trans = btrfs_join_transaction_nolock(root);
@@ -1466,7 +1466,7 @@ static void btrfs_set_bit_hook(struct inode *inode,
if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 len = state->end + 1 - state->start;
- bool do_list = !btrfs_is_free_space_inode(root, inode);
+ bool do_list = !btrfs_is_free_space_inode(inode);
if (*bits & EXTENT_FIRST_DELALLOC) {
*bits &= ~EXTENT_FIRST_DELALLOC;
@@ -1501,7 +1501,7 @@ static void btrfs_clear_bit_hook(struct inode *inode,
if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 len = state->end + 1 - state->start;
- bool do_list = !btrfs_is_free_space_inode(root, inode);
+ bool do_list = !btrfs_is_free_space_inode(inode);
if (*bits & EXTENT_FIRST_DELALLOC) {
*bits &= ~EXTENT_FIRST_DELALLOC;
@@ -1612,7 +1612,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
- if (btrfs_is_free_space_inode(root, inode))
+ if (btrfs_is_free_space_inode(inode))
metadata = 2;
if (!(rw & REQ_WRITE)) {
@@ -1869,7 +1869,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
int ret;
bool nolock;
- nolock = btrfs_is_free_space_inode(root, inode);
+ nolock = btrfs_is_free_space_inode(inode);
if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
ret = -EIO;
@@ -2007,7 +2007,7 @@ static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
ordered_extent->work.func = finish_ordered_fn;
ordered_extent->work.flags = 0;
- if (btrfs_is_free_space_inode(root, inode))
+ if (btrfs_is_free_space_inode(inode))
workers = &root->fs_info->endio_freespace_worker;
else
workers = &root->fs_info->endio_write_workers;
@@ -2732,8 +2732,10 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
* The data relocation inode should also be directly updated
* without delay
*/
- if (!btrfs_is_free_space_inode(root, inode)
+ if (!btrfs_is_free_space_inode(inode)
&& root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
+ btrfs_update_root_times(trans, root);
+
ret = btrfs_delayed_update_inode(trans, root, inode);
if (!ret)
btrfs_set_inode_last_trans(trans, inode);
@@ -2833,7 +2835,7 @@ err:
inode_inc_iversion(inode);
inode_inc_iversion(dir);
inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
- btrfs_update_inode(trans, root, dir);
+ ret = btrfs_update_inode(trans, root, dir);
out:
return ret;
}
@@ -3743,7 +3745,7 @@ void btrfs_evict_inode(struct inode *inode)
truncate_inode_pages(&inode->i_data, 0);
if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 ||
- btrfs_is_free_space_inode(root, inode)))
+ btrfs_is_free_space_inode(inode)))
goto no_delete;
if (is_bad_inode(inode)) {
@@ -4082,7 +4084,6 @@ static int btrfs_init_locked_inode(struct inode *inode, void *p)
struct btrfs_iget_args *args = p;
inode->i_ino = args->ino;
BTRFS_I(inode)->root = args->root;
- btrfs_set_inode_space_info(args->root, inode);
return 0;
}
@@ -4457,7 +4458,7 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
return 0;
- if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(root, inode))
+ if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(inode))
nolock = true;
if (wbc->sync_mode == WB_SYNC_ALL) {
@@ -4518,6 +4519,11 @@ int btrfs_dirty_inode(struct inode *inode)
static int btrfs_update_time(struct inode *inode, struct timespec *now,
int flags)
{
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+
+ if (btrfs_root_readonly(root))
+ return -EROFS;
+
if (flags & S_VERSION)
inode_inc_iversion(inode);
if (flags & S_CTIME)
@@ -4662,7 +4668,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
BTRFS_I(inode)->root = root;
BTRFS_I(inode)->generation = trans->transid;
inode->i_generation = BTRFS_I(inode)->generation;
- btrfs_set_inode_space_info(root, inode);
if (S_ISDIR(mode))
owner = 0;
@@ -4690,6 +4695,8 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_item);
+ memset_extent_buffer(path->nodes[0], 0, (unsigned long)inode_item,
+ sizeof(*inode_item));
fill_inode_item(trans, path->nodes[0], inode_item, inode);
ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
@@ -4723,6 +4730,8 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
trace_btrfs_inode_new(inode);
btrfs_set_inode_last_trans(trans, inode);
+ btrfs_update_root_times(trans, root);
+
return inode;
fail:
if (dir)
@@ -6939,7 +6948,6 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
return NULL;
ei->root = NULL;
- ei->space_info = NULL;
ei->generation = 0;
ei->last_trans = 0;
ei->last_sub_trans = 0;
@@ -7046,7 +7054,7 @@ int btrfs_drop_inode(struct inode *inode)
struct btrfs_root *root = BTRFS_I(inode)->root;
if (btrfs_root_refs(&root->root_item) == 0 &&
- !btrfs_is_free_space_inode(root, inode))
+ !btrfs_is_free_space_inode(inode))
return 1;
else
return generic_drop_inode(inode);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 1e9f6c019ad..43f0012016e 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -41,6 +41,7 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
+#include <linux/uuid.h>
#include "compat.h"
#include "ctree.h"
#include "disk-io.h"
@@ -53,6 +54,7 @@
#include "inode-map.h"
#include "backref.h"
#include "rcu-string.h"
+#include "send.h"
/* Mask out flags that are inappropriate for the given type of inode. */
static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags)
@@ -336,7 +338,8 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
static noinline int create_subvol(struct btrfs_root *root,
struct dentry *dentry,
char *name, int namelen,
- u64 *async_transid)
+ u64 *async_transid,
+ struct btrfs_qgroup_inherit **inherit)
{
struct btrfs_trans_handle *trans;
struct btrfs_key key;
@@ -346,11 +349,13 @@ static noinline int create_subvol(struct btrfs_root *root,
struct btrfs_root *new_root;
struct dentry *parent = dentry->d_parent;
struct inode *dir;
+ struct timespec cur_time = CURRENT_TIME;
int ret;
int err;
u64 objectid;
u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
u64 index = 0;
+ uuid_le new_uuid;
ret = btrfs_find_free_objectid(root->fs_info->tree_root, &objectid);
if (ret)
@@ -368,6 +373,11 @@ static noinline int create_subvol(struct btrfs_root *root,
if (IS_ERR(trans))
return PTR_ERR(trans);
+ ret = btrfs_qgroup_inherit(trans, root->fs_info, 0, objectid,
+ inherit ? *inherit : NULL);
+ if (ret)
+ goto fail;
+
leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
0, objectid, NULL, 0, 0, 0);
if (IS_ERR(leaf)) {
@@ -389,8 +399,9 @@ static noinline int create_subvol(struct btrfs_root *root,
BTRFS_UUID_SIZE);
btrfs_mark_buffer_dirty(leaf);
+ memset(&root_item, 0, sizeof(root_item));
+
inode_item = &root_item.inode;
- memset(inode_item, 0, sizeof(*inode_item));
inode_item->generation = cpu_to_le64(1);
inode_item->size = cpu_to_le64(3);
inode_item->nlink = cpu_to_le32(1);
@@ -408,8 +419,15 @@ static noinline int create_subvol(struct btrfs_root *root,
btrfs_set_root_used(&root_item, leaf->len);
btrfs_set_root_last_snapshot(&root_item, 0);
- memset(&root_item.drop_progress, 0, sizeof(root_item.drop_progress));
- root_item.drop_level = 0;
+ btrfs_set_root_generation_v2(&root_item,
+ btrfs_root_generation(&root_item));
+ uuid_le_gen(&new_uuid);
+ memcpy(root_item.uuid, new_uuid.b, BTRFS_UUID_SIZE);
+ root_item.otime.sec = cpu_to_le64(cur_time.tv_sec);
+ root_item.otime.nsec = cpu_to_le64(cur_time.tv_nsec);
+ root_item.ctime = root_item.otime;
+ btrfs_set_root_ctransid(&root_item, trans->transid);
+ btrfs_set_root_otransid(&root_item, trans->transid);
btrfs_tree_unlock(leaf);
free_extent_buffer(leaf);
@@ -484,7 +502,7 @@ fail:
static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
char *name, int namelen, u64 *async_transid,
- bool readonly)
+ bool readonly, struct btrfs_qgroup_inherit **inherit)
{
struct inode *inode;
struct btrfs_pending_snapshot *pending_snapshot;
@@ -502,6 +520,10 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
pending_snapshot->dentry = dentry;
pending_snapshot->root = root;
pending_snapshot->readonly = readonly;
+ if (inherit) {
+ pending_snapshot->inherit = *inherit;
+ *inherit = NULL; /* take responsibility to free it */
+ }
trans = btrfs_start_transaction(root->fs_info->extent_root, 5);
if (IS_ERR(trans)) {
@@ -635,7 +657,8 @@ static inline int btrfs_may_create(struct inode *dir, struct dentry *child)
static noinline int btrfs_mksubvol(struct path *parent,
char *name, int namelen,
struct btrfs_root *snap_src,
- u64 *async_transid, bool readonly)
+ u64 *async_transid, bool readonly,
+ struct btrfs_qgroup_inherit **inherit)
{
struct inode *dir = parent->dentry->d_inode;
struct dentry *dentry;
@@ -652,13 +675,9 @@ static noinline int btrfs_mksubvol(struct path *parent,
if (dentry->d_inode)
goto out_dput;
- error = mnt_want_write(parent->mnt);
- if (error)
- goto out_dput;
-
error = btrfs_may_create(dir, dentry);
if (error)
- goto out_drop_write;
+ goto out_dput;
down_read(&BTRFS_I(dir)->root->fs_info->subvol_sem);
@@ -666,18 +685,16 @@ static noinline int btrfs_mksubvol(struct path *parent,
goto out_up_read;
if (snap_src) {
- error = create_snapshot(snap_src, dentry,
- name, namelen, async_transid, readonly);
+ error = create_snapshot(snap_src, dentry, name, namelen,
+ async_transid, readonly, inherit);
} else {
error = create_subvol(BTRFS_I(dir)->root, dentry,
- name, namelen, async_transid);
+ name, namelen, async_transid, inherit);
}
if (!error)
fsnotify_mkdir(dir, dentry);
out_up_read:
up_read(&BTRFS_I(dir)->root->fs_info->subvol_sem);
-out_drop_write:
- mnt_drop_write(parent->mnt);
out_dput:
dput(dentry);
out_unlock:
@@ -832,7 +849,8 @@ static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em)
}
static int should_defrag_range(struct inode *inode, u64 start, int thresh,
- u64 *last_len, u64 *skip, u64 *defrag_end)
+ u64 *last_len, u64 *skip, u64 *defrag_end,
+ int compress)
{
struct extent_map *em;
int ret = 1;
@@ -863,7 +881,7 @@ static int should_defrag_range(struct inode *inode, u64 start, int thresh,
* we hit a real extent, if it is big or the next extent is not a
* real extent, don't bother defragging it
*/
- if ((*last_len == 0 || *last_len >= thresh) &&
+ if (!compress && (*last_len == 0 || *last_len >= thresh) &&
(em->len >= thresh || !next_mergeable))
ret = 0;
out:
@@ -1047,11 +1065,9 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
u64 newer_than, unsigned long max_to_defrag)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
- struct btrfs_super_block *disk_super;
struct file_ra_state *ra = NULL;
unsigned long last_index;
u64 isize = i_size_read(inode);
- u64 features;
u64 last_len = 0;
u64 skip = 0;
u64 defrag_end = 0;
@@ -1145,7 +1161,8 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT,
extent_thresh, &last_len, &skip,
- &defrag_end)) {
+ &defrag_end, range->flags &
+ BTRFS_DEFRAG_RANGE_COMPRESS)) {
unsigned long next;
/*
* the should_defrag function tells us how much to skip
@@ -1237,11 +1254,8 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
mutex_unlock(&inode->i_mutex);
}
- disk_super = root->fs_info->super_copy;
- features = btrfs_super_incompat_flags(disk_super);
if (range->compress_type == BTRFS_COMPRESS_LZO) {
- features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
- btrfs_set_super_incompat_flags(disk_super, features);
+ btrfs_set_fs_incompat(root->fs_info, COMPRESS_LZO);
}
ret = defrag_count;
@@ -1379,41 +1393,39 @@ out:
}
static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
- char *name,
- unsigned long fd,
- int subvol,
- u64 *transid,
- bool readonly)
+ char *name, unsigned long fd, int subvol,
+ u64 *transid, bool readonly,
+ struct btrfs_qgroup_inherit **inherit)
{
- struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
struct file *src_file;
int namelen;
int ret = 0;
- if (root->fs_info->sb->s_flags & MS_RDONLY)
- return -EROFS;
+ ret = mnt_want_write_file(file);
+ if (ret)
+ goto out;
namelen = strlen(name);
if (strchr(name, '/')) {
ret = -EINVAL;
- goto out;
+ goto out_drop_write;
}
if (name[0] == '.' &&
(namelen == 1 || (name[1] == '.' && namelen == 2))) {
ret = -EEXIST;
- goto out;
+ goto out_drop_write;
}
if (subvol) {
ret = btrfs_mksubvol(&file->f_path, name, namelen,
- NULL, transid, readonly);
+ NULL, transid, readonly, inherit);
} else {
struct inode *src_inode;
src_file = fget(fd);
if (!src_file) {
ret = -EINVAL;
- goto out;
+ goto out_drop_write;
}
src_inode = src_file->f_path.dentry->d_inode;
@@ -1422,13 +1434,15 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
"another FS\n");
ret = -EINVAL;
fput(src_file);
- goto out;
+ goto out_drop_write;
}
ret = btrfs_mksubvol(&file->f_path, name, namelen,
BTRFS_I(src_inode)->root,
- transid, readonly);
+ transid, readonly, inherit);
fput(src_file);
}
+out_drop_write:
+ mnt_drop_write_file(file);
out:
return ret;
}
@@ -1446,7 +1460,7 @@ static noinline int btrfs_ioctl_snap_create(struct file *file,
ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
vol_args->fd, subvol,
- NULL, false);
+ NULL, false, NULL);
kfree(vol_args);
return ret;
@@ -1460,6 +1474,7 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
u64 transid = 0;
u64 *ptr = NULL;
bool readonly = false;
+ struct btrfs_qgroup_inherit *inherit = NULL;
vol_args = memdup_user(arg, sizeof(*vol_args));
if (IS_ERR(vol_args))
@@ -1467,7 +1482,8 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
if (vol_args->flags &
- ~(BTRFS_SUBVOL_CREATE_ASYNC | BTRFS_SUBVOL_RDONLY)) {
+ ~(BTRFS_SUBVOL_CREATE_ASYNC | BTRFS_SUBVOL_RDONLY |
+ BTRFS_SUBVOL_QGROUP_INHERIT)) {
ret = -EOPNOTSUPP;
goto out;
}
@@ -1476,10 +1492,21 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
ptr = &transid;
if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
readonly = true;
+ if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
+ if (vol_args->size > PAGE_CACHE_SIZE) {
+ ret = -EINVAL;
+ goto out;
+ }
+ inherit = memdup_user(vol_args->qgroup_inherit, vol_args->size);
+ if (IS_ERR(inherit)) {
+ ret = PTR_ERR(inherit);
+ goto out;
+ }
+ }
ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
- vol_args->fd, subvol,
- ptr, readonly);
+ vol_args->fd, subvol, ptr,
+ readonly, &inherit);
if (ret == 0 && ptr &&
copy_to_user(arg +
@@ -1488,6 +1515,7 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
ret = -EFAULT;
out:
kfree(vol_args);
+ kfree(inherit);
return ret;
}
@@ -1523,29 +1551,40 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
u64 flags;
int ret = 0;
- if (root->fs_info->sb->s_flags & MS_RDONLY)
- return -EROFS;
+ ret = mnt_want_write_file(file);
+ if (ret)
+ goto out;
- if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID)
- return -EINVAL;
+ if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) {
+ ret = -EINVAL;
+ goto out_drop_write;
+ }
- if (copy_from_user(&flags, arg, sizeof(flags)))
- return -EFAULT;
+ if (copy_from_user(&flags, arg, sizeof(flags))) {
+ ret = -EFAULT;
+ goto out_drop_write;
+ }
- if (flags & BTRFS_SUBVOL_CREATE_ASYNC)
- return -EINVAL;
+ if (flags & BTRFS_SUBVOL_CREATE_ASYNC) {
+ ret = -EINVAL;
+ goto out_drop_write;
+ }
- if (flags & ~BTRFS_SUBVOL_RDONLY)
- return -EOPNOTSUPP;
+ if (flags & ~BTRFS_SUBVOL_RDONLY) {
+ ret = -EOPNOTSUPP;
+ goto out_drop_write;
+ }
- if (!inode_owner_or_capable(inode))
- return -EACCES;
+ if (!inode_owner_or_capable(inode)) {
+ ret = -EACCES;
+ goto out_drop_write;
+ }
down_write(&root->fs_info->subvol_sem);
/* nothing to do */
if (!!(flags & BTRFS_SUBVOL_RDONLY) == btrfs_root_readonly(root))
- goto out;
+ goto out_drop_sem;
root_flags = btrfs_root_flags(&root->root_item);
if (flags & BTRFS_SUBVOL_RDONLY)
@@ -1568,8 +1607,11 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
out_reset:
if (ret)
btrfs_set_root_flags(&root->root_item, root_flags);
-out:
+out_drop_sem:
up_write(&root->fs_info->subvol_sem);
+out_drop_write:
+ mnt_drop_write_file(file);
+out:
return ret;
}
@@ -2340,6 +2382,10 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
goto out_drop_write;
}
+ ret = -EXDEV;
+ if (src_file->f_path.mnt != file->f_path.mnt)
+ goto out_fput;
+
src = src_file->f_dentry->d_inode;
ret = -EINVAL;
@@ -2360,7 +2406,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
goto out_fput;
ret = -EXDEV;
- if (src->i_sb != inode->i_sb || BTRFS_I(src)->root != root)
+ if (src->i_sb != inode->i_sb)
goto out_fput;
ret = -ENOMEM;
@@ -2434,13 +2480,14 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
* note the key will change type as we walk through the
* tree.
*/
- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ ret = btrfs_search_slot(NULL, BTRFS_I(src)->root, &key, path,
+ 0, 0);
if (ret < 0)
goto out;
nritems = btrfs_header_nritems(path->nodes[0]);
if (path->slots[0] >= nritems) {
- ret = btrfs_next_leaf(root, path);
+ ret = btrfs_next_leaf(BTRFS_I(src)->root, path);
if (ret < 0)
goto out;
if (ret > 0)
@@ -2749,8 +2796,6 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
struct btrfs_path *path;
struct btrfs_key location;
struct btrfs_disk_key disk_key;
- struct btrfs_super_block *disk_super;
- u64 features;
u64 objectid = 0;
u64 dir_id;
@@ -2801,12 +2846,7 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
btrfs_mark_buffer_dirty(path->nodes[0]);
btrfs_free_path(path);
- disk_super = root->fs_info->super_copy;
- features = btrfs_super_incompat_flags(disk_super);
- if (!(features & BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL)) {
- features |= BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL;
- btrfs_set_super_incompat_flags(disk_super, features);
- }
+ btrfs_set_fs_incompat(root->fs_info, DEFAULT_SUBVOL);
btrfs_end_transaction(trans, root);
return 0;
@@ -3063,19 +3103,21 @@ static long btrfs_ioctl_scrub_progress(struct btrfs_root *root,
}
static long btrfs_ioctl_get_dev_stats(struct btrfs_root *root,
- void __user *arg, int reset_after_read)
+ void __user *arg)
{
struct btrfs_ioctl_get_dev_stats *sa;
int ret;
- if (reset_after_read && !capable(CAP_SYS_ADMIN))
- return -EPERM;
-
sa = memdup_user(arg, sizeof(*sa));
if (IS_ERR(sa))
return PTR_ERR(sa);
- ret = btrfs_get_dev_stats(root, sa, reset_after_read);
+ if ((sa->flags & BTRFS_DEV_STATS_RESET) && !capable(CAP_SYS_ADMIN)) {
+ kfree(sa);
+ return -EPERM;
+ }
+
+ ret = btrfs_get_dev_stats(root, sa);
if (copy_to_user(arg, sa, sizeof(*sa)))
ret = -EFAULT;
@@ -3265,9 +3307,6 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (fs_info->sb->s_flags & MS_RDONLY)
- return -EROFS;
-
ret = mnt_want_write_file(file);
if (ret)
return ret;
@@ -3390,6 +3429,264 @@ out:
return ret;
}
+static long btrfs_ioctl_quota_ctl(struct btrfs_root *root, void __user *arg)
+{
+ struct btrfs_ioctl_quota_ctl_args *sa;
+ struct btrfs_trans_handle *trans = NULL;
+ int ret;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (root->fs_info->sb->s_flags & MS_RDONLY)
+ return -EROFS;
+
+ sa = memdup_user(arg, sizeof(*sa));
+ if (IS_ERR(sa))
+ return PTR_ERR(sa);
+
+ if (sa->cmd != BTRFS_QUOTA_CTL_RESCAN) {
+ trans = btrfs_start_transaction(root, 2);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ goto out;
+ }
+ }
+
+ switch (sa->cmd) {
+ case BTRFS_QUOTA_CTL_ENABLE:
+ ret = btrfs_quota_enable(trans, root->fs_info);
+ break;
+ case BTRFS_QUOTA_CTL_DISABLE:
+ ret = btrfs_quota_disable(trans, root->fs_info);
+ break;
+ case BTRFS_QUOTA_CTL_RESCAN:
+ ret = btrfs_quota_rescan(root->fs_info);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (copy_to_user(arg, sa, sizeof(*sa)))
+ ret = -EFAULT;
+
+ if (trans) {
+ err = btrfs_commit_transaction(trans, root);
+ if (err && !ret)
+ ret = err;
+ }
+
+out:
+ kfree(sa);
+ return ret;
+}
+
+static long btrfs_ioctl_qgroup_assign(struct btrfs_root *root, void __user *arg)
+{
+ struct btrfs_ioctl_qgroup_assign_args *sa;
+ struct btrfs_trans_handle *trans;
+ int ret;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (root->fs_info->sb->s_flags & MS_RDONLY)
+ return -EROFS;
+
+ sa = memdup_user(arg, sizeof(*sa));
+ if (IS_ERR(sa))
+ return PTR_ERR(sa);
+
+ trans = btrfs_join_transaction(root);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ goto out;
+ }
+
+ /* FIXME: check if the IDs really exist */
+ if (sa->assign) {
+ ret = btrfs_add_qgroup_relation(trans, root->fs_info,
+ sa->src, sa->dst);
+ } else {
+ ret = btrfs_del_qgroup_relation(trans, root->fs_info,
+ sa->src, sa->dst);
+ }
+
+ err = btrfs_end_transaction(trans, root);
+ if (err && !ret)
+ ret = err;
+
+out:
+ kfree(sa);
+ return ret;
+}
+
+static long btrfs_ioctl_qgroup_create(struct btrfs_root *root, void __user *arg)
+{
+ struct btrfs_ioctl_qgroup_create_args *sa;
+ struct btrfs_trans_handle *trans;
+ int ret;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (root->fs_info->sb->s_flags & MS_RDONLY)
+ return -EROFS;
+
+ sa = memdup_user(arg, sizeof(*sa));
+ if (IS_ERR(sa))
+ return PTR_ERR(sa);
+
+ trans = btrfs_join_transaction(root);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ goto out;
+ }
+
+ /* FIXME: check if the IDs really exist */
+ if (sa->create) {
+ ret = btrfs_create_qgroup(trans, root->fs_info, sa->qgroupid,
+ NULL);
+ } else {
+ ret = btrfs_remove_qgroup(trans, root->fs_info, sa->qgroupid);
+ }
+
+ err = btrfs_end_transaction(trans, root);
+ if (err && !ret)
+ ret = err;
+
+out:
+ kfree(sa);
+ return ret;
+}
+
+static long btrfs_ioctl_qgroup_limit(struct btrfs_root *root, void __user *arg)
+{
+ struct btrfs_ioctl_qgroup_limit_args *sa;
+ struct btrfs_trans_handle *trans;
+ int ret;
+ int err;
+ u64 qgroupid;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (root->fs_info->sb->s_flags & MS_RDONLY)
+ return -EROFS;
+
+ sa = memdup_user(arg, sizeof(*sa));
+ if (IS_ERR(sa))
+ return PTR_ERR(sa);
+
+ trans = btrfs_join_transaction(root);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ goto out;
+ }
+
+ qgroupid = sa->qgroupid;
+ if (!qgroupid) {
+ /* take the current subvol as qgroup */
+ qgroupid = root->root_key.objectid;
+ }
+
+ /* FIXME: check if the IDs really exist */
+ ret = btrfs_limit_qgroup(trans, root->fs_info, qgroupid, &sa->lim);
+
+ err = btrfs_end_transaction(trans, root);
+ if (err && !ret)
+ ret = err;
+
+out:
+ kfree(sa);
+ return ret;
+}
+
+static long btrfs_ioctl_set_received_subvol(struct file *file,
+ void __user *arg)
+{
+ struct btrfs_ioctl_received_subvol_args *sa = NULL;
+ struct inode *inode = fdentry(file)->d_inode;
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_root_item *root_item = &root->root_item;
+ struct btrfs_trans_handle *trans;
+ struct timespec ct = CURRENT_TIME;
+ int ret = 0;
+
+ ret = mnt_want_write_file(file);
+ if (ret < 0)
+ return ret;
+
+ down_write(&root->fs_info->subvol_sem);
+
+ if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (btrfs_root_readonly(root)) {
+ ret = -EROFS;
+ goto out;
+ }
+
+ if (!inode_owner_or_capable(inode)) {
+ ret = -EACCES;
+ goto out;
+ }
+
+ sa = memdup_user(arg, sizeof(*sa));
+ if (IS_ERR(sa)) {
+ ret = PTR_ERR(sa);
+ sa = NULL;
+ goto out;
+ }
+
+ trans = btrfs_start_transaction(root, 1);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ trans = NULL;
+ goto out;
+ }
+
+ sa->rtransid = trans->transid;
+ sa->rtime.sec = ct.tv_sec;
+ sa->rtime.nsec = ct.tv_nsec;
+
+ memcpy(root_item->received_uuid, sa->uuid, BTRFS_UUID_SIZE);
+ btrfs_set_root_stransid(root_item, sa->stransid);
+ btrfs_set_root_rtransid(root_item, sa->rtransid);
+ root_item->stime.sec = cpu_to_le64(sa->stime.sec);
+ root_item->stime.nsec = cpu_to_le32(sa->stime.nsec);
+ root_item->rtime.sec = cpu_to_le64(sa->rtime.sec);
+ root_item->rtime.nsec = cpu_to_le32(sa->rtime.nsec);
+
+ ret = btrfs_update_root(trans, root->fs_info->tree_root,
+ &root->root_key, &root->root_item);
+ if (ret < 0) {
+ btrfs_end_transaction(trans, root);
+ trans = NULL;
+ goto out;
+ } else {
+ ret = btrfs_commit_transaction(trans, root);
+ if (ret < 0)
+ goto out;
+ }
+
+ ret = copy_to_user(arg, sa, sizeof(*sa));
+ if (ret)
+ ret = -EFAULT;
+
+out:
+ kfree(sa);
+ up_write(&root->fs_info->subvol_sem);
+ mnt_drop_write_file(file);
+ return ret;
+}
+
long btrfs_ioctl(struct file *file, unsigned int
cmd, unsigned long arg)
{
@@ -3411,6 +3708,8 @@ long btrfs_ioctl(struct file *file, unsigned int
return btrfs_ioctl_snap_create_v2(file, argp, 0);
case BTRFS_IOC_SUBVOL_CREATE:
return btrfs_ioctl_snap_create(file, argp, 1);
+ case BTRFS_IOC_SUBVOL_CREATE_V2:
+ return btrfs_ioctl_snap_create_v2(file, argp, 1);
case BTRFS_IOC_SNAP_DESTROY:
return btrfs_ioctl_snap_destroy(file, argp);
case BTRFS_IOC_SUBVOL_GETFLAGS:
@@ -3472,10 +3771,20 @@ long btrfs_ioctl(struct file *file, unsigned int
return btrfs_ioctl_balance_ctl(root, arg);
case BTRFS_IOC_BALANCE_PROGRESS:
return btrfs_ioctl_balance_progress(root, argp);
+ case BTRFS_IOC_SET_RECEIVED_SUBVOL:
+ return btrfs_ioctl_set_received_subvol(file, argp);
+ case BTRFS_IOC_SEND:
+ return btrfs_ioctl_send(file, argp);
case BTRFS_IOC_GET_DEV_STATS:
- return btrfs_ioctl_get_dev_stats(root, argp, 0);
- case BTRFS_IOC_GET_AND_RESET_DEV_STATS:
- return btrfs_ioctl_get_dev_stats(root, argp, 1);
+ return btrfs_ioctl_get_dev_stats(root, argp);
+ case BTRFS_IOC_QUOTA_CTL:
+ return btrfs_ioctl_quota_ctl(root, argp);
+ case BTRFS_IOC_QGROUP_ASSIGN:
+ return btrfs_ioctl_qgroup_assign(root, argp);
+ case BTRFS_IOC_QGROUP_CREATE:
+ return btrfs_ioctl_qgroup_create(root, argp);
+ case BTRFS_IOC_QGROUP_LIMIT:
+ return btrfs_ioctl_qgroup_limit(root, argp);
}
return -ENOTTY;
diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h
index e440aa653c3..731e2875ab9 100644
--- a/fs/btrfs/ioctl.h
+++ b/fs/btrfs/ioctl.h
@@ -32,15 +32,46 @@ struct btrfs_ioctl_vol_args {
#define BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0)
#define BTRFS_SUBVOL_RDONLY (1ULL << 1)
+#define BTRFS_SUBVOL_QGROUP_INHERIT (1ULL << 2)
#define BTRFS_FSID_SIZE 16
#define BTRFS_UUID_SIZE 16
+#define BTRFS_QGROUP_INHERIT_SET_LIMITS (1ULL << 0)
+
+struct btrfs_qgroup_limit {
+ __u64 flags;
+ __u64 max_rfer;
+ __u64 max_excl;
+ __u64 rsv_rfer;
+ __u64 rsv_excl;
+};
+
+struct btrfs_qgroup_inherit {
+ __u64 flags;
+ __u64 num_qgroups;
+ __u64 num_ref_copies;
+ __u64 num_excl_copies;
+ struct btrfs_qgroup_limit lim;
+ __u64 qgroups[0];
+};
+
+struct btrfs_ioctl_qgroup_limit_args {
+ __u64 qgroupid;
+ struct btrfs_qgroup_limit lim;
+};
+
#define BTRFS_SUBVOL_NAME_MAX 4039
struct btrfs_ioctl_vol_args_v2 {
__s64 fd;
__u64 transid;
__u64 flags;
- __u64 unused[4];
+ union {
+ struct {
+ __u64 size;
+ struct btrfs_qgroup_inherit __user *qgroup_inherit;
+ };
+ __u64 unused[4];
+ };
char name[BTRFS_SUBVOL_NAME_MAX + 1];
};
@@ -285,9 +316,13 @@ enum btrfs_dev_stat_values {
BTRFS_DEV_STAT_VALUES_MAX
};
+/* Reset statistics after reading; needs SYS_ADMIN capability */
+#define BTRFS_DEV_STATS_RESET (1ULL << 0)
+
struct btrfs_ioctl_get_dev_stats {
__u64 devid; /* in */
__u64 nr_items; /* in/out */
+ __u64 flags; /* in/out */
/* out values: */
__u64 values[BTRFS_DEV_STAT_VALUES_MAX];
@@ -295,6 +330,48 @@ struct btrfs_ioctl_get_dev_stats {
__u64 unused[128 - 2 - BTRFS_DEV_STAT_VALUES_MAX]; /* pad to 1k */
};
+#define BTRFS_QUOTA_CTL_ENABLE 1
+#define BTRFS_QUOTA_CTL_DISABLE 2
+#define BTRFS_QUOTA_CTL_RESCAN 3
+struct btrfs_ioctl_quota_ctl_args {
+ __u64 cmd;
+ __u64 status;
+};
+
+struct btrfs_ioctl_qgroup_assign_args {
+ __u64 assign;
+ __u64 src;
+ __u64 dst;
+};
+
+struct btrfs_ioctl_qgroup_create_args {
+ __u64 create;
+ __u64 qgroupid;
+};
+struct btrfs_ioctl_timespec {
+ __u64 sec;
+ __u32 nsec;
+};
+
+struct btrfs_ioctl_received_subvol_args {
+ char uuid[BTRFS_UUID_SIZE]; /* in */
+ __u64 stransid; /* in */
+ __u64 rtransid; /* out */
+ struct btrfs_ioctl_timespec stime; /* in */
+ struct btrfs_ioctl_timespec rtime; /* out */
+ __u64 flags; /* in */
+ __u64 reserved[16]; /* in */
+};
+
+struct btrfs_ioctl_send_args {
+ __s64 send_fd; /* in */
+ __u64 clone_sources_count; /* in */
+ __u64 __user *clone_sources; /* in */
+ __u64 parent_root; /* in */
+ __u64 flags; /* in */
+ __u64 reserved[4]; /* in */
+};
+
#define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \
struct btrfs_ioctl_vol_args)
#define BTRFS_IOC_DEFRAG _IOW(BTRFS_IOCTL_MAGIC, 2, \
@@ -339,6 +416,8 @@ struct btrfs_ioctl_get_dev_stats {
#define BTRFS_IOC_WAIT_SYNC _IOW(BTRFS_IOCTL_MAGIC, 22, __u64)
#define BTRFS_IOC_SNAP_CREATE_V2 _IOW(BTRFS_IOCTL_MAGIC, 23, \
struct btrfs_ioctl_vol_args_v2)
+#define BTRFS_IOC_SUBVOL_CREATE_V2 _IOW(BTRFS_IOCTL_MAGIC, 24, \
+ struct btrfs_ioctl_vol_args_v2)
#define BTRFS_IOC_SUBVOL_GETFLAGS _IOR(BTRFS_IOCTL_MAGIC, 25, __u64)
#define BTRFS_IOC_SUBVOL_SETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 26, __u64)
#define BTRFS_IOC_SCRUB _IOWR(BTRFS_IOCTL_MAGIC, 27, \
@@ -359,9 +438,19 @@ struct btrfs_ioctl_get_dev_stats {
struct btrfs_ioctl_ino_path_args)
#define BTRFS_IOC_LOGICAL_INO _IOWR(BTRFS_IOCTL_MAGIC, 36, \
struct btrfs_ioctl_ino_path_args)
+#define BTRFS_IOC_SET_RECEIVED_SUBVOL _IOWR(BTRFS_IOCTL_MAGIC, 37, \
+ struct btrfs_ioctl_received_subvol_args)
+#define BTRFS_IOC_SEND _IOW(BTRFS_IOCTL_MAGIC, 38, struct btrfs_ioctl_send_args)
+#define BTRFS_IOC_DEVICES_READY _IOR(BTRFS_IOCTL_MAGIC, 39, \
+ struct btrfs_ioctl_vol_args)
+#define BTRFS_IOC_QUOTA_CTL _IOWR(BTRFS_IOCTL_MAGIC, 40, \
+ struct btrfs_ioctl_quota_ctl_args)
+#define BTRFS_IOC_QGROUP_ASSIGN _IOW(BTRFS_IOCTL_MAGIC, 41, \
+ struct btrfs_ioctl_qgroup_assign_args)
+#define BTRFS_IOC_QGROUP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 42, \
+ struct btrfs_ioctl_qgroup_create_args)
+#define BTRFS_IOC_QGROUP_LIMIT _IOR(BTRFS_IOCTL_MAGIC, 43, \
+ struct btrfs_ioctl_qgroup_limit_args)
#define BTRFS_IOC_GET_DEV_STATS _IOWR(BTRFS_IOCTL_MAGIC, 52, \
struct btrfs_ioctl_get_dev_stats)
-#define BTRFS_IOC_GET_AND_RESET_DEV_STATS _IOWR(BTRFS_IOCTL_MAGIC, 53, \
- struct btrfs_ioctl_get_dev_stats)
-
#endif
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 272f911203f..a44eff07480 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -78,13 +78,15 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
write_lock(&eb->lock);
WARN_ON(atomic_read(&eb->spinning_writers));
atomic_inc(&eb->spinning_writers);
- if (atomic_dec_and_test(&eb->blocking_writers))
+ if (atomic_dec_and_test(&eb->blocking_writers) &&
+ waitqueue_active(&eb->write_lock_wq))
wake_up(&eb->write_lock_wq);
} else if (rw == BTRFS_READ_LOCK_BLOCKING) {
BUG_ON(atomic_read(&eb->blocking_readers) == 0);
read_lock(&eb->lock);
atomic_inc(&eb->spinning_readers);
- if (atomic_dec_and_test(&eb->blocking_readers))
+ if (atomic_dec_and_test(&eb->blocking_readers) &&
+ waitqueue_active(&eb->read_lock_wq))
wake_up(&eb->read_lock_wq);
}
return;
@@ -199,7 +201,8 @@ void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
}
btrfs_assert_tree_read_locked(eb);
WARN_ON(atomic_read(&eb->blocking_readers) == 0);
- if (atomic_dec_and_test(&eb->blocking_readers))
+ if (atomic_dec_and_test(&eb->blocking_readers) &&
+ waitqueue_active(&eb->read_lock_wq))
wake_up(&eb->read_lock_wq);
atomic_dec(&eb->read_locks);
}
@@ -247,8 +250,9 @@ void btrfs_tree_unlock(struct extent_buffer *eb)
if (blockers) {
WARN_ON(atomic_read(&eb->spinning_writers));
atomic_dec(&eb->blocking_writers);
- smp_wmb();
- wake_up(&eb->write_lock_wq);
+ smp_mb();
+ if (waitqueue_active(&eb->write_lock_wq))
+ wake_up(&eb->write_lock_wq);
} else {
WARN_ON(atomic_read(&eb->spinning_writers) != 1);
atomic_dec(&eb->spinning_writers);
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
new file mode 100644
index 00000000000..bc424ae5a81
--- /dev/null
+++ b/fs/btrfs/qgroup.c
@@ -0,0 +1,1571 @@
+/*
+ * Copyright (C) 2011 STRATO. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/sched.h>
+#include <linux/pagemap.h>
+#include <linux/writeback.h>
+#include <linux/blkdev.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include "ctree.h"
+#include "transaction.h"
+#include "disk-io.h"
+#include "locking.h"
+#include "ulist.h"
+#include "ioctl.h"
+#include "backref.h"
+
+/* TODO XXX FIXME
+ * - subvol delete -> delete when ref goes to 0? delete limits also?
+ * - reorganize keys
+ * - compressed
+ * - sync
+ * - rescan
+ * - copy also limits on subvol creation
+ * - limit
+ * - caches fuer ulists
+ * - performance benchmarks
+ * - check all ioctl parameters
+ */
+
+/*
+ * one struct for each qgroup, organized in fs_info->qgroup_tree.
+ */
+struct btrfs_qgroup {
+ u64 qgroupid;
+
+ /*
+ * state
+ */
+ u64 rfer; /* referenced */
+ u64 rfer_cmpr; /* referenced compressed */
+ u64 excl; /* exclusive */
+ u64 excl_cmpr; /* exclusive compressed */
+
+ /*
+ * limits
+ */
+ u64 lim_flags; /* which limits are set */
+ u64 max_rfer;
+ u64 max_excl;
+ u64 rsv_rfer;
+ u64 rsv_excl;
+
+ /*
+ * reservation tracking
+ */
+ u64 reserved;
+
+ /*
+ * lists
+ */
+ struct list_head groups; /* groups this group is member of */
+ struct list_head members; /* groups that are members of this group */
+ struct list_head dirty; /* dirty groups */
+ struct rb_node node; /* tree of qgroups */
+
+ /*
+ * temp variables for accounting operations
+ */
+ u64 tag;
+ u64 refcnt;
+};
+
+/*
+ * glue structure to represent the relations between qgroups.
+ */
+struct btrfs_qgroup_list {
+ struct list_head next_group;
+ struct list_head next_member;
+ struct btrfs_qgroup *group;
+ struct btrfs_qgroup *member;
+};
+
+/* must be called with qgroup_lock held */
+static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
+ u64 qgroupid)
+{
+ struct rb_node *n = fs_info->qgroup_tree.rb_node;
+ struct btrfs_qgroup *qgroup;
+
+ while (n) {
+ qgroup = rb_entry(n, struct btrfs_qgroup, node);
+ if (qgroup->qgroupid < qgroupid)
+ n = n->rb_left;
+ else if (qgroup->qgroupid > qgroupid)
+ n = n->rb_right;
+ else
+ return qgroup;
+ }
+ return NULL;
+}
+
+/* must be called with qgroup_lock held */
+static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
+ u64 qgroupid)
+{
+ struct rb_node **p = &fs_info->qgroup_tree.rb_node;
+ struct rb_node *parent = NULL;
+ struct btrfs_qgroup *qgroup;
+
+ while (*p) {
+ parent = *p;
+ qgroup = rb_entry(parent, struct btrfs_qgroup, node);
+
+ if (qgroup->qgroupid < qgroupid)
+ p = &(*p)->rb_left;
+ else if (qgroup->qgroupid > qgroupid)
+ p = &(*p)->rb_right;
+ else
+ return qgroup;
+ }
+
+ qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
+ if (!qgroup)
+ return ERR_PTR(-ENOMEM);
+
+ qgroup->qgroupid = qgroupid;
+ INIT_LIST_HEAD(&qgroup->groups);
+ INIT_LIST_HEAD(&qgroup->members);
+ INIT_LIST_HEAD(&qgroup->dirty);
+
+ rb_link_node(&qgroup->node, parent, p);
+ rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
+
+ return qgroup;
+}
+
+/* must be called with qgroup_lock held */
+static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
+{
+ struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
+ struct btrfs_qgroup_list *list;
+
+ if (!qgroup)
+ return -ENOENT;
+
+ rb_erase(&qgroup->node, &fs_info->qgroup_tree);
+ list_del(&qgroup->dirty);
+
+ while (!list_empty(&qgroup->groups)) {
+ list = list_first_entry(&qgroup->groups,
+ struct btrfs_qgroup_list, next_group);
+ list_del(&list->next_group);
+ list_del(&list->next_member);
+ kfree(list);
+ }
+
+ while (!list_empty(&qgroup->members)) {
+ list = list_first_entry(&qgroup->members,
+ struct btrfs_qgroup_list, next_member);
+ list_del(&list->next_group);
+ list_del(&list->next_member);
+ kfree(list);
+ }
+ kfree(qgroup);
+
+ return 0;
+}
+
+/* must be called with qgroup_lock held */
+static int add_relation_rb(struct btrfs_fs_info *fs_info,
+ u64 memberid, u64 parentid)
+{
+ struct btrfs_qgroup *member;
+ struct btrfs_qgroup *parent;
+ struct btrfs_qgroup_list *list;
+
+ member = find_qgroup_rb(fs_info, memberid);
+ parent = find_qgroup_rb(fs_info, parentid);
+ if (!member || !parent)
+ return -ENOENT;
+
+ list = kzalloc(sizeof(*list), GFP_ATOMIC);
+ if (!list)
+ return -ENOMEM;
+
+ list->group = parent;
+ list->member = member;
+ list_add_tail(&list->next_group, &member->groups);
+ list_add_tail(&list->next_member, &parent->members);
+
+ return 0;
+}
+
+/* must be called with qgroup_lock held */
+static int del_relation_rb(struct btrfs_fs_info *fs_info,
+ u64 memberid, u64 parentid)
+{
+ struct btrfs_qgroup *member;
+ struct btrfs_qgroup *parent;
+ struct btrfs_qgroup_list *list;
+
+ member = find_qgroup_rb(fs_info, memberid);
+ parent = find_qgroup_rb(fs_info, parentid);
+ if (!member || !parent)
+ return -ENOENT;
+
+ list_for_each_entry(list, &member->groups, next_group) {
+ if (list->group == parent) {
+ list_del(&list->next_group);
+ list_del(&list->next_member);
+ kfree(list);
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+/*
+ * The full config is read in one go, only called from open_ctree()
+ * It doesn't use any locking, as at this point we're still single-threaded
+ */
+int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_key key;
+ struct btrfs_key found_key;
+ struct btrfs_root *quota_root = fs_info->quota_root;
+ struct btrfs_path *path = NULL;
+ struct extent_buffer *l;
+ int slot;
+ int ret = 0;
+ u64 flags = 0;
+
+ if (!fs_info->quota_enabled)
+ return 0;
+
+ path = btrfs_alloc_path();
+ if (!path) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* default this to quota off, in case no status key is found */
+ fs_info->qgroup_flags = 0;
+
+ /*
+ * pass 1: read status, all qgroup infos and limits
+ */
+ key.objectid = 0;
+ key.type = 0;
+ key.offset = 0;
+ ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
+ if (ret)
+ goto out;
+
+ while (1) {
+ struct btrfs_qgroup *qgroup;
+
+ slot = path->slots[0];
+ l = path->nodes[0];
+ btrfs_item_key_to_cpu(l, &found_key, slot);
+
+ if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
+ struct btrfs_qgroup_status_item *ptr;
+
+ ptr = btrfs_item_ptr(l, slot,
+ struct btrfs_qgroup_status_item);
+
+ if (btrfs_qgroup_status_version(l, ptr) !=
+ BTRFS_QGROUP_STATUS_VERSION) {
+ printk(KERN_ERR
+ "btrfs: old qgroup version, quota disabled\n");
+ goto out;
+ }
+ if (btrfs_qgroup_status_generation(l, ptr) !=
+ fs_info->generation) {
+ flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+ printk(KERN_ERR
+ "btrfs: qgroup generation mismatch, "
+ "marked as inconsistent\n");
+ }
+ fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
+ ptr);
+ /* FIXME read scan element */
+ goto next1;
+ }
+
+ if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
+ found_key.type != BTRFS_QGROUP_LIMIT_KEY)
+ goto next1;
+
+ qgroup = find_qgroup_rb(fs_info, found_key.offset);
+ if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
+ (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
+ printk(KERN_ERR "btrfs: inconsitent qgroup config\n");
+ flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+ }
+ if (!qgroup) {
+ qgroup = add_qgroup_rb(fs_info, found_key.offset);
+ if (IS_ERR(qgroup)) {
+ ret = PTR_ERR(qgroup);
+ goto out;
+ }
+ }
+ switch (found_key.type) {
+ case BTRFS_QGROUP_INFO_KEY: {
+ struct btrfs_qgroup_info_item *ptr;
+
+ ptr = btrfs_item_ptr(l, slot,
+ struct btrfs_qgroup_info_item);
+ qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
+ qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
+ qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
+ qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
+ /* generation currently unused */
+ break;
+ }
+ case BTRFS_QGROUP_LIMIT_KEY: {
+ struct btrfs_qgroup_limit_item *ptr;
+
+ ptr = btrfs_item_ptr(l, slot,
+ struct btrfs_qgroup_limit_item);
+ qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
+ qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
+ qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
+ qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
+ qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
+ break;
+ }
+ }
+next1:
+ ret = btrfs_next_item(quota_root, path);
+ if (ret < 0)
+ goto out;
+ if (ret)
+ break;
+ }
+ btrfs_release_path(path);
+
+ /*
+ * pass 2: read all qgroup relations
+ */
+ key.objectid = 0;
+ key.type = BTRFS_QGROUP_RELATION_KEY;
+ key.offset = 0;
+ ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
+ if (ret)
+ goto out;
+ while (1) {
+ slot = path->slots[0];
+ l = path->nodes[0];
+ btrfs_item_key_to_cpu(l, &found_key, slot);
+
+ if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
+ goto next2;
+
+ if (found_key.objectid > found_key.offset) {
+ /* parent <- member, not needed to build config */
+ /* FIXME should we omit the key completely? */
+ goto next2;
+ }
+
+ ret = add_relation_rb(fs_info, found_key.objectid,
+ found_key.offset);
+ if (ret)
+ goto out;
+next2:
+ ret = btrfs_next_item(quota_root, path);
+ if (ret < 0)
+ goto out;
+ if (ret)
+ break;
+ }
+out:
+ fs_info->qgroup_flags |= flags;
+ if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) {
+ fs_info->quota_enabled = 0;
+ fs_info->pending_quota_state = 0;
+ }
+ btrfs_free_path(path);
+
+ return ret < 0 ? ret : 0;
+}
+
+/*
+ * This is only called from close_ctree() or open_ctree(), both in single-
+ * treaded paths. Clean up the in-memory structures. No locking needed.
+ */
+void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
+{
+ struct rb_node *n;
+ struct btrfs_qgroup *qgroup;
+ struct btrfs_qgroup_list *list;
+
+ while ((n = rb_first(&fs_info->qgroup_tree))) {
+ qgroup = rb_entry(n, struct btrfs_qgroup, node);
+ rb_erase(n, &fs_info->qgroup_tree);
+
+ WARN_ON(!list_empty(&qgroup->dirty));
+
+ while (!list_empty(&qgroup->groups)) {
+ list = list_first_entry(&qgroup->groups,
+ struct btrfs_qgroup_list,
+ next_group);
+ list_del(&list->next_group);
+ list_del(&list->next_member);
+ kfree(list);
+ }
+
+ while (!list_empty(&qgroup->members)) {
+ list = list_first_entry(&qgroup->members,
+ struct btrfs_qgroup_list,
+ next_member);
+ list_del(&list->next_group);
+ list_del(&list->next_member);
+ kfree(list);
+ }
+ kfree(qgroup);
+ }
+}
+
+static int add_qgroup_relation_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *quota_root,
+ u64 src, u64 dst)
+{
+ int ret;
+ struct btrfs_path *path;
+ struct btrfs_key key;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ key.objectid = src;
+ key.type = BTRFS_QGROUP_RELATION_KEY;
+ key.offset = dst;
+
+ ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
+
+ btrfs_mark_buffer_dirty(path->nodes[0]);
+
+ btrfs_free_path(path);
+ return ret;
+}
+
+static int del_qgroup_relation_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *quota_root,
+ u64 src, u64 dst)
+{
+ int ret;
+ struct btrfs_path *path;
+ struct btrfs_key key;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ key.objectid = src;
+ key.type = BTRFS_QGROUP_RELATION_KEY;
+ key.offset = dst;
+
+ ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
+ if (ret < 0)
+ goto out;
+
+ if (ret > 0) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ ret = btrfs_del_item(trans, quota_root, path);
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+static int add_qgroup_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *quota_root, u64 qgroupid)
+{
+ int ret;
+ struct btrfs_path *path;
+ struct btrfs_qgroup_info_item *qgroup_info;
+ struct btrfs_qgroup_limit_item *qgroup_limit;
+ struct extent_buffer *leaf;
+ struct btrfs_key key;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ key.objectid = 0;
+ key.type = BTRFS_QGROUP_INFO_KEY;
+ key.offset = qgroupid;
+
+ ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
+ sizeof(*qgroup_info));
+ if (ret)
+ goto out;
+
+ leaf = path->nodes[0];
+ qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_qgroup_info_item);
+ btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
+ btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
+ btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
+ btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
+ btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
+
+ btrfs_mark_buffer_dirty(leaf);
+
+ btrfs_release_path(path);
+
+ key.type = BTRFS_QGROUP_LIMIT_KEY;
+ ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
+ sizeof(*qgroup_limit));
+ if (ret)
+ goto out;
+
+ leaf = path->nodes[0];
+ qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_qgroup_limit_item);
+ btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
+ btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
+ btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
+ btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
+ btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
+
+ btrfs_mark_buffer_dirty(leaf);
+
+ ret = 0;
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+static int del_qgroup_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *quota_root, u64 qgroupid)
+{
+ int ret;
+ struct btrfs_path *path;
+ struct btrfs_key key;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ key.objectid = 0;
+ key.type = BTRFS_QGROUP_INFO_KEY;
+ key.offset = qgroupid;
+ ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
+ if (ret < 0)
+ goto out;
+
+ if (ret > 0) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ ret = btrfs_del_item(trans, quota_root, path);
+ if (ret)
+ goto out;
+
+ btrfs_release_path(path);
+
+ key.type = BTRFS_QGROUP_LIMIT_KEY;
+ ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
+ if (ret < 0)
+ goto out;
+
+ if (ret > 0) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ ret = btrfs_del_item(trans, quota_root, path);
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, u64 qgroupid,
+ u64 flags, u64 max_rfer, u64 max_excl,
+ u64 rsv_rfer, u64 rsv_excl)
+{
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ struct extent_buffer *l;
+ struct btrfs_qgroup_limit_item *qgroup_limit;
+ int ret;
+ int slot;
+
+ key.objectid = 0;
+ key.type = BTRFS_QGROUP_LIMIT_KEY;
+ key.offset = qgroupid;
+
+ path = btrfs_alloc_path();
+ BUG_ON(!path);
+ ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
+ if (ret > 0)
+ ret = -ENOENT;
+
+ if (ret)
+ goto out;
+
+ l = path->nodes[0];
+ slot = path->slots[0];
+ qgroup_limit = btrfs_item_ptr(l, path->slots[0],
+ struct btrfs_qgroup_limit_item);
+ btrfs_set_qgroup_limit_flags(l, qgroup_limit, flags);
+ btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, max_rfer);
+ btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, max_excl);
+ btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, rsv_rfer);
+ btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, rsv_excl);
+
+ btrfs_mark_buffer_dirty(l);
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_qgroup *qgroup)
+{
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ struct extent_buffer *l;
+ struct btrfs_qgroup_info_item *qgroup_info;
+ int ret;
+ int slot;
+
+ key.objectid = 0;
+ key.type = BTRFS_QGROUP_INFO_KEY;
+ key.offset = qgroup->qgroupid;
+
+ path = btrfs_alloc_path();
+ BUG_ON(!path);
+ ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
+ if (ret > 0)
+ ret = -ENOENT;
+
+ if (ret)
+ goto out;
+
+ l = path->nodes[0];
+ slot = path->slots[0];
+ qgroup_info = btrfs_item_ptr(l, path->slots[0],
+ struct btrfs_qgroup_info_item);
+ btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
+ btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
+ btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
+ btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
+ btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
+
+ btrfs_mark_buffer_dirty(l);
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+static int update_qgroup_status_item(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+ struct btrfs_root *root)
+{
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ struct extent_buffer *l;
+ struct btrfs_qgroup_status_item *ptr;
+ int ret;
+ int slot;
+
+ key.objectid = 0;
+ key.type = BTRFS_QGROUP_STATUS_KEY;
+ key.offset = 0;
+
+ path = btrfs_alloc_path();
+ BUG_ON(!path);
+ ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
+ if (ret > 0)
+ ret = -ENOENT;
+
+ if (ret)
+ goto out;
+
+ l = path->nodes[0];
+ slot = path->slots[0];
+ ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
+ btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags);
+ btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
+ /* XXX scan */
+
+ btrfs_mark_buffer_dirty(l);
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+/*
+ * called with qgroup_lock held
+ */
+static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root)
+{
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ int ret;
+
+ if (!root)
+ return -EINVAL;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ while (1) {
+ key.objectid = 0;
+ key.offset = 0;
+ key.type = 0;
+
+ path->leave_spinning = 1;
+ ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+ if (ret > 0) {
+ if (path->slots[0] == 0)
+ break;
+ path->slots[0]--;
+ } else if (ret < 0) {
+ break;
+ }
+
+ ret = btrfs_del_item(trans, root, path);
+ if (ret)
+ goto out;
+ btrfs_release_path(path);
+ }
+ ret = 0;
+out:
+ root->fs_info->pending_quota_state = 0;
+ btrfs_free_path(path);
+ return ret;
+}
+
+int btrfs_quota_enable(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_root *quota_root;
+ struct btrfs_path *path = NULL;
+ struct btrfs_qgroup_status_item *ptr;
+ struct extent_buffer *leaf;
+ struct btrfs_key key;
+ int ret = 0;
+
+ spin_lock(&fs_info->qgroup_lock);
+ if (fs_info->quota_root) {
+ fs_info->pending_quota_state = 1;
+ spin_unlock(&fs_info->qgroup_lock);
+ goto out;
+ }
+ spin_unlock(&fs_info->qgroup_lock);
+
+ /*
+ * initially create the quota tree
+ */
+ quota_root = btrfs_create_tree(trans, fs_info,
+ BTRFS_QUOTA_TREE_OBJECTID);
+ if (IS_ERR(quota_root)) {
+ ret = PTR_ERR(quota_root);
+ goto out;
+ }
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ key.objectid = 0;
+ key.type = BTRFS_QGROUP_STATUS_KEY;
+ key.offset = 0;
+
+ ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
+ sizeof(*ptr));
+ if (ret)
+ goto out;
+
+ leaf = path->nodes[0];
+ ptr = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_qgroup_status_item);
+ btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
+ btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
+ fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
+ BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+ btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags);
+ btrfs_set_qgroup_status_scan(leaf, ptr, 0);
+
+ btrfs_mark_buffer_dirty(leaf);
+
+ spin_lock(&fs_info->qgroup_lock);
+ fs_info->quota_root = quota_root;
+ fs_info->pending_quota_state = 1;
+ spin_unlock(&fs_info->qgroup_lock);
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+int btrfs_quota_disable(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_root *tree_root = fs_info->tree_root;
+ struct btrfs_root *quota_root;
+ int ret = 0;
+
+ spin_lock(&fs_info->qgroup_lock);
+ fs_info->quota_enabled = 0;
+ fs_info->pending_quota_state = 0;
+ quota_root = fs_info->quota_root;
+ fs_info->quota_root = NULL;
+ btrfs_free_qgroup_config(fs_info);
+ spin_unlock(&fs_info->qgroup_lock);
+
+ if (!quota_root)
+ return -EINVAL;
+
+ ret = btrfs_clean_quota_tree(trans, quota_root);
+ if (ret)
+ goto out;
+
+ ret = btrfs_del_root(trans, tree_root, &quota_root->root_key);
+ if (ret)
+ goto out;
+
+ list_del(&quota_root->dirty_list);
+
+ btrfs_tree_lock(quota_root->node);
+ clean_tree_block(trans, tree_root, quota_root->node);
+ btrfs_tree_unlock(quota_root->node);
+ btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
+
+ free_extent_buffer(quota_root->node);
+ free_extent_buffer(quota_root->commit_root);
+ kfree(quota_root);
+out:
+ return ret;
+}
+
+int btrfs_quota_rescan(struct btrfs_fs_info *fs_info)
+{
+ /* FIXME */
+ return 0;
+}
+
+int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info, u64 src, u64 dst)
+{
+ struct btrfs_root *quota_root;
+ int ret = 0;
+
+ quota_root = fs_info->quota_root;
+ if (!quota_root)
+ return -EINVAL;
+
+ ret = add_qgroup_relation_item(trans, quota_root, src, dst);
+ if (ret)
+ return ret;
+
+ ret = add_qgroup_relation_item(trans, quota_root, dst, src);
+ if (ret) {
+ del_qgroup_relation_item(trans, quota_root, src, dst);
+ return ret;
+ }
+
+ spin_lock(&fs_info->qgroup_lock);
+ ret = add_relation_rb(quota_root->fs_info, src, dst);
+ spin_unlock(&fs_info->qgroup_lock);
+
+ return ret;
+}
+
+int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info, u64 src, u64 dst)
+{
+ struct btrfs_root *quota_root;
+ int ret = 0;
+ int err;
+
+ quota_root = fs_info->quota_root;
+ if (!quota_root)
+ return -EINVAL;
+
+ ret = del_qgroup_relation_item(trans, quota_root, src, dst);
+ err = del_qgroup_relation_item(trans, quota_root, dst, src);
+ if (err && !ret)
+ ret = err;
+
+ spin_lock(&fs_info->qgroup_lock);
+ del_relation_rb(fs_info, src, dst);
+
+ spin_unlock(&fs_info->qgroup_lock);
+
+ return ret;
+}
+
+int btrfs_create_qgroup(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info, u64 qgroupid, char *name)
+{
+ struct btrfs_root *quota_root;
+ struct btrfs_qgroup *qgroup;
+ int ret = 0;
+
+ quota_root = fs_info->quota_root;
+ if (!quota_root)
+ return -EINVAL;
+
+ ret = add_qgroup_item(trans, quota_root, qgroupid);
+
+ spin_lock(&fs_info->qgroup_lock);
+ qgroup = add_qgroup_rb(fs_info, qgroupid);
+ spin_unlock(&fs_info->qgroup_lock);
+
+ if (IS_ERR(qgroup))
+ ret = PTR_ERR(qgroup);
+
+ return ret;
+}
+
+int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info, u64 qgroupid)
+{
+ struct btrfs_root *quota_root;
+ int ret = 0;
+
+ quota_root = fs_info->quota_root;
+ if (!quota_root)
+ return -EINVAL;
+
+ ret = del_qgroup_item(trans, quota_root, qgroupid);
+
+ spin_lock(&fs_info->qgroup_lock);
+ del_qgroup_rb(quota_root->fs_info, qgroupid);
+
+ spin_unlock(&fs_info->qgroup_lock);
+
+ return ret;
+}
+
+int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info, u64 qgroupid,
+ struct btrfs_qgroup_limit *limit)
+{
+ struct btrfs_root *quota_root = fs_info->quota_root;
+ struct btrfs_qgroup *qgroup;
+ int ret = 0;
+
+ if (!quota_root)
+ return -EINVAL;
+
+ ret = update_qgroup_limit_item(trans, quota_root, qgroupid,
+ limit->flags, limit->max_rfer,
+ limit->max_excl, limit->rsv_rfer,
+ limit->rsv_excl);
+ if (ret) {
+ fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+ printk(KERN_INFO "unable to update quota limit for %llu\n",
+ (unsigned long long)qgroupid);
+ }
+
+ spin_lock(&fs_info->qgroup_lock);
+
+ qgroup = find_qgroup_rb(fs_info, qgroupid);
+ if (!qgroup) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+ qgroup->lim_flags = limit->flags;
+ qgroup->max_rfer = limit->max_rfer;
+ qgroup->max_excl = limit->max_excl;
+ qgroup->rsv_rfer = limit->rsv_rfer;
+ qgroup->rsv_excl = limit->rsv_excl;
+
+unlock:
+ spin_unlock(&fs_info->qgroup_lock);
+
+ return ret;
+}
+
+static void qgroup_dirty(struct btrfs_fs_info *fs_info,
+ struct btrfs_qgroup *qgroup)
+{
+ if (list_empty(&qgroup->dirty))
+ list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
+}
+
+/*
+ * btrfs_qgroup_record_ref is called when the ref is added or deleted. it puts
+ * the modification into a list that's later used by btrfs_end_transaction to
+ * pass the recorded modifications on to btrfs_qgroup_account_ref.
+ */
+int btrfs_qgroup_record_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_node *node,
+ struct btrfs_delayed_extent_op *extent_op)
+{
+ struct qgroup_update *u;
+
+ BUG_ON(!trans->delayed_ref_elem.seq);
+ u = kmalloc(sizeof(*u), GFP_NOFS);
+ if (!u)
+ return -ENOMEM;
+
+ u->node = node;
+ u->extent_op = extent_op;
+ list_add_tail(&u->list, &trans->qgroup_ref_list);
+
+ return 0;
+}
+
+/*
+ * btrfs_qgroup_account_ref is called for every ref that is added to or deleted
+ * from the fs. First, all roots referencing the extent are searched, and
+ * then the space is accounted accordingly to the different roots. The
+ * accounting algorithm works in 3 steps documented inline.
+ */
+int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_node *node,
+ struct btrfs_delayed_extent_op *extent_op)
+{
+ struct btrfs_key ins;
+ struct btrfs_root *quota_root;
+ u64 ref_root;
+ struct btrfs_qgroup *qgroup;
+ struct ulist_node *unode;
+ struct ulist *roots = NULL;
+ struct ulist *tmp = NULL;
+ struct ulist_iterator uiter;
+ u64 seq;
+ int ret = 0;
+ int sgn;
+
+ if (!fs_info->quota_enabled)
+ return 0;
+
+ BUG_ON(!fs_info->quota_root);
+
+ ins.objectid = node->bytenr;
+ ins.offset = node->num_bytes;
+ ins.type = BTRFS_EXTENT_ITEM_KEY;
+
+ if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
+ node->type == BTRFS_SHARED_BLOCK_REF_KEY) {
+ struct btrfs_delayed_tree_ref *ref;
+ ref = btrfs_delayed_node_to_tree_ref(node);
+ ref_root = ref->root;
+ } else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
+ node->type == BTRFS_SHARED_DATA_REF_KEY) {
+ struct btrfs_delayed_data_ref *ref;
+ ref = btrfs_delayed_node_to_data_ref(node);
+ ref_root = ref->root;
+ } else {
+ BUG();
+ }
+
+ if (!is_fstree(ref_root)) {
+ /*
+ * non-fs-trees are not being accounted
+ */
+ return 0;
+ }
+
+ switch (node->action) {
+ case BTRFS_ADD_DELAYED_REF:
+ case BTRFS_ADD_DELAYED_EXTENT:
+ sgn = 1;
+ break;
+ case BTRFS_DROP_DELAYED_REF:
+ sgn = -1;
+ break;
+ case BTRFS_UPDATE_DELAYED_HEAD:
+ return 0;
+ default:
+ BUG();
+ }
+
+ /*
+ * the delayed ref sequence number we pass depends on the direction of
+ * the operation. for add operations, we pass (node->seq - 1) to skip
+ * the delayed ref's current sequence number, because we need the state
+ * of the tree before the add operation. for delete operations, we pass
+ * (node->seq) to include the delayed ref's current sequence number,
+ * because we need the state of the tree after the delete operation.
+ */
+ ret = btrfs_find_all_roots(trans, fs_info, node->bytenr,
+ sgn > 0 ? node->seq - 1 : node->seq, &roots);
+ if (ret < 0)
+ goto out;
+
+ spin_lock(&fs_info->qgroup_lock);
+ quota_root = fs_info->quota_root;
+ if (!quota_root)
+ goto unlock;
+
+ qgroup = find_qgroup_rb(fs_info, ref_root);
+ if (!qgroup)
+ goto unlock;
+
+ /*
+ * step 1: for each old ref, visit all nodes once and inc refcnt
+ */
+ tmp = ulist_alloc(GFP_ATOMIC);
+ if (!tmp) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ seq = fs_info->qgroup_seq;
+ fs_info->qgroup_seq += roots->nnodes + 1; /* max refcnt */
+
+ ULIST_ITER_INIT(&uiter);
+ while ((unode = ulist_next(roots, &uiter))) {
+ struct ulist_node *tmp_unode;
+ struct ulist_iterator tmp_uiter;
+ struct btrfs_qgroup *qg;
+
+ qg = find_qgroup_rb(fs_info, unode->val);
+ if (!qg)
+ continue;
+
+ ulist_reinit(tmp);
+ /* XXX id not needed */
+ ulist_add(tmp, qg->qgroupid, (unsigned long)qg, GFP_ATOMIC);
+ ULIST_ITER_INIT(&tmp_uiter);
+ while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
+ struct btrfs_qgroup_list *glist;
+
+ qg = (struct btrfs_qgroup *)tmp_unode->aux;
+ if (qg->refcnt < seq)
+ qg->refcnt = seq + 1;
+ else
+ ++qg->refcnt;
+
+ list_for_each_entry(glist, &qg->groups, next_group) {
+ ulist_add(tmp, glist->group->qgroupid,
+ (unsigned long)glist->group,
+ GFP_ATOMIC);
+ }
+ }
+ }
+
+ /*
+ * step 2: walk from the new root
+ */
+ ulist_reinit(tmp);
+ ulist_add(tmp, qgroup->qgroupid, (unsigned long)qgroup, GFP_ATOMIC);
+ ULIST_ITER_INIT(&uiter);
+ while ((unode = ulist_next(tmp, &uiter))) {
+ struct btrfs_qgroup *qg;
+ struct btrfs_qgroup_list *glist;
+
+ qg = (struct btrfs_qgroup *)unode->aux;
+ if (qg->refcnt < seq) {
+ /* not visited by step 1 */
+ qg->rfer += sgn * node->num_bytes;
+ qg->rfer_cmpr += sgn * node->num_bytes;
+ if (roots->nnodes == 0) {
+ qg->excl += sgn * node->num_bytes;
+ qg->excl_cmpr += sgn * node->num_bytes;
+ }
+ qgroup_dirty(fs_info, qg);
+ }
+ WARN_ON(qg->tag >= seq);
+ qg->tag = seq;
+
+ list_for_each_entry(glist, &qg->groups, next_group) {
+ ulist_add(tmp, glist->group->qgroupid,
+ (unsigned long)glist->group, GFP_ATOMIC);
+ }
+ }
+
+ /*
+ * step 3: walk again from old refs
+ */
+ ULIST_ITER_INIT(&uiter);
+ while ((unode = ulist_next(roots, &uiter))) {
+ struct btrfs_qgroup *qg;
+ struct ulist_node *tmp_unode;
+ struct ulist_iterator tmp_uiter;
+
+ qg = find_qgroup_rb(fs_info, unode->val);
+ if (!qg)
+ continue;
+
+ ulist_reinit(tmp);
+ ulist_add(tmp, qg->qgroupid, (unsigned long)qg, GFP_ATOMIC);
+ ULIST_ITER_INIT(&tmp_uiter);
+ while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
+ struct btrfs_qgroup_list *glist;
+
+ qg = (struct btrfs_qgroup *)tmp_unode->aux;
+ if (qg->tag == seq)
+ continue;
+
+ if (qg->refcnt - seq == roots->nnodes) {
+ qg->excl -= sgn * node->num_bytes;
+ qg->excl_cmpr -= sgn * node->num_bytes;
+ qgroup_dirty(fs_info, qg);
+ }
+
+ list_for_each_entry(glist, &qg->groups, next_group) {
+ ulist_add(tmp, glist->group->qgroupid,
+ (unsigned long)glist->group,
+ GFP_ATOMIC);
+ }
+ }
+ }
+ ret = 0;
+unlock:
+ spin_unlock(&fs_info->qgroup_lock);
+out:
+ ulist_free(roots);
+ ulist_free(tmp);
+
+ return ret;
+}
+
+/*
+ * called from commit_transaction. Writes all changed qgroups to disk.
+ */
+int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_root *quota_root = fs_info->quota_root;
+ int ret = 0;
+
+ if (!quota_root)
+ goto out;
+
+ fs_info->quota_enabled = fs_info->pending_quota_state;
+
+ spin_lock(&fs_info->qgroup_lock);
+ while (!list_empty(&fs_info->dirty_qgroups)) {
+ struct btrfs_qgroup *qgroup;
+ qgroup = list_first_entry(&fs_info->dirty_qgroups,
+ struct btrfs_qgroup, dirty);
+ list_del_init(&qgroup->dirty);
+ spin_unlock(&fs_info->qgroup_lock);
+ ret = update_qgroup_info_item(trans, quota_root, qgroup);
+ if (ret)
+ fs_info->qgroup_flags |=
+ BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+ spin_lock(&fs_info->qgroup_lock);
+ }
+ if (fs_info->quota_enabled)
+ fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
+ else
+ fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
+ spin_unlock(&fs_info->qgroup_lock);
+
+ ret = update_qgroup_status_item(trans, fs_info, quota_root);
+ if (ret)
+ fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+
+out:
+
+ return ret;
+}
+
+/*
+ * copy the acounting information between qgroups. This is necessary when a
+ * snapshot or a subvolume is created
+ */
+int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
+ struct btrfs_qgroup_inherit *inherit)
+{
+ int ret = 0;
+ int i;
+ u64 *i_qgroups;
+ struct btrfs_root *quota_root = fs_info->quota_root;
+ struct btrfs_qgroup *srcgroup;
+ struct btrfs_qgroup *dstgroup;
+ u32 level_size = 0;
+
+ if (!fs_info->quota_enabled)
+ return 0;
+
+ if (!quota_root)
+ return -EINVAL;
+
+ /*
+ * create a tracking group for the subvol itself
+ */
+ ret = add_qgroup_item(trans, quota_root, objectid);
+ if (ret)
+ goto out;
+
+ if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
+ ret = update_qgroup_limit_item(trans, quota_root, objectid,
+ inherit->lim.flags,
+ inherit->lim.max_rfer,
+ inherit->lim.max_excl,
+ inherit->lim.rsv_rfer,
+ inherit->lim.rsv_excl);
+ if (ret)
+ goto out;
+ }
+
+ if (srcid) {
+ struct btrfs_root *srcroot;
+ struct btrfs_key srckey;
+ int srcroot_level;
+
+ srckey.objectid = srcid;
+ srckey.type = BTRFS_ROOT_ITEM_KEY;
+ srckey.offset = (u64)-1;
+ srcroot = btrfs_read_fs_root_no_name(fs_info, &srckey);
+ if (IS_ERR(srcroot)) {
+ ret = PTR_ERR(srcroot);
+ goto out;
+ }
+
+ rcu_read_lock();
+ srcroot_level = btrfs_header_level(srcroot->node);
+ level_size = btrfs_level_size(srcroot, srcroot_level);
+ rcu_read_unlock();
+ }
+
+ /*
+ * add qgroup to all inherited groups
+ */
+ if (inherit) {
+ i_qgroups = (u64 *)(inherit + 1);
+ for (i = 0; i < inherit->num_qgroups; ++i) {
+ ret = add_qgroup_relation_item(trans, quota_root,
+ objectid, *i_qgroups);
+ if (ret)
+ goto out;
+ ret = add_qgroup_relation_item(trans, quota_root,
+ *i_qgroups, objectid);
+ if (ret)
+ goto out;
+ ++i_qgroups;
+ }
+ }
+
+
+ spin_lock(&fs_info->qgroup_lock);
+
+ dstgroup = add_qgroup_rb(fs_info, objectid);
+ if (!dstgroup)
+ goto unlock;
+
+ if (srcid) {
+ srcgroup = find_qgroup_rb(fs_info, srcid);
+ if (!srcgroup)
+ goto unlock;
+ dstgroup->rfer = srcgroup->rfer - level_size;
+ dstgroup->rfer_cmpr = srcgroup->rfer_cmpr - level_size;
+ srcgroup->excl = level_size;
+ srcgroup->excl_cmpr = level_size;
+ qgroup_dirty(fs_info, dstgroup);
+ qgroup_dirty(fs_info, srcgroup);
+ }
+
+ if (!inherit)
+ goto unlock;
+
+ i_qgroups = (u64 *)(inherit + 1);
+ for (i = 0; i < inherit->num_qgroups; ++i) {
+ ret = add_relation_rb(quota_root->fs_info, objectid,
+ *i_qgroups);
+ if (ret)
+ goto unlock;
+ ++i_qgroups;
+ }
+
+ for (i = 0; i < inherit->num_ref_copies; ++i) {
+ struct btrfs_qgroup *src;
+ struct btrfs_qgroup *dst;
+
+ src = find_qgroup_rb(fs_info, i_qgroups[0]);
+ dst = find_qgroup_rb(fs_info, i_qgroups[1]);
+
+ if (!src || !dst) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ dst->rfer = src->rfer - level_size;
+ dst->rfer_cmpr = src->rfer_cmpr - level_size;
+ i_qgroups += 2;
+ }
+ for (i = 0; i < inherit->num_excl_copies; ++i) {
+ struct btrfs_qgroup *src;
+ struct btrfs_qgroup *dst;
+
+ src = find_qgroup_rb(fs_info, i_qgroups[0]);
+ dst = find_qgroup_rb(fs_info, i_qgroups[1]);
+
+ if (!src || !dst) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ dst->excl = src->excl + level_size;
+ dst->excl_cmpr = src->excl_cmpr + level_size;
+ i_qgroups += 2;
+ }
+
+unlock:
+ spin_unlock(&fs_info->qgroup_lock);
+out:
+ return ret;
+}
+
+/*
+ * reserve some space for a qgroup and all its parents. The reservation takes
+ * place with start_transaction or dealloc_reserve, similar to ENOSPC
+ * accounting. If not enough space is available, EDQUOT is returned.
+ * We assume that the requested space is new for all qgroups.
+ */
+int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
+{
+ struct btrfs_root *quota_root;
+ struct btrfs_qgroup *qgroup;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ u64 ref_root = root->root_key.objectid;
+ int ret = 0;
+ struct ulist *ulist = NULL;
+ struct ulist_node *unode;
+ struct ulist_iterator uiter;
+
+ if (!is_fstree(ref_root))
+ return 0;
+
+ if (num_bytes == 0)
+ return 0;
+
+ spin_lock(&fs_info->qgroup_lock);
+ quota_root = fs_info->quota_root;
+ if (!quota_root)
+ goto out;
+
+ qgroup = find_qgroup_rb(fs_info, ref_root);
+ if (!qgroup)
+ goto out;
+
+ /*
+ * in a first step, we check all affected qgroups if any limits would
+ * be exceeded
+ */
+ ulist = ulist_alloc(GFP_ATOMIC);
+ ulist_add(ulist, qgroup->qgroupid, (unsigned long)qgroup, GFP_ATOMIC);
+ ULIST_ITER_INIT(&uiter);
+ while ((unode = ulist_next(ulist, &uiter))) {
+ struct btrfs_qgroup *qg;
+ struct btrfs_qgroup_list *glist;
+
+ qg = (struct btrfs_qgroup *)unode->aux;
+
+ if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
+ qg->reserved + qg->rfer + num_bytes >
+ qg->max_rfer)
+ ret = -EDQUOT;
+
+ if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
+ qg->reserved + qg->excl + num_bytes >
+ qg->max_excl)
+ ret = -EDQUOT;
+
+ list_for_each_entry(glist, &qg->groups, next_group) {
+ ulist_add(ulist, glist->group->qgroupid,
+ (unsigned long)glist->group, GFP_ATOMIC);
+ }
+ }
+ if (ret)
+ goto out;
+
+ /*
+ * no limits exceeded, now record the reservation into all qgroups
+ */
+ ULIST_ITER_INIT(&uiter);
+ while ((unode = ulist_next(ulist, &uiter))) {
+ struct btrfs_qgroup *qg;
+
+ qg = (struct btrfs_qgroup *)unode->aux;
+
+ qg->reserved += num_bytes;
+ }
+
+out:
+ spin_unlock(&fs_info->qgroup_lock);
+ ulist_free(ulist);
+
+ return ret;
+}
+
+void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes)
+{
+ struct btrfs_root *quota_root;
+ struct btrfs_qgroup *qgroup;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct ulist *ulist = NULL;
+ struct ulist_node *unode;
+ struct ulist_iterator uiter;
+ u64 ref_root = root->root_key.objectid;
+
+ if (!is_fstree(ref_root))
+ return;
+
+ if (num_bytes == 0)
+ return;
+
+ spin_lock(&fs_info->qgroup_lock);
+
+ quota_root = fs_info->quota_root;
+ if (!quota_root)
+ goto out;
+
+ qgroup = find_qgroup_rb(fs_info, ref_root);
+ if (!qgroup)
+ goto out;
+
+ ulist = ulist_alloc(GFP_ATOMIC);
+ ulist_add(ulist, qgroup->qgroupid, (unsigned long)qgroup, GFP_ATOMIC);
+ ULIST_ITER_INIT(&uiter);
+ while ((unode = ulist_next(ulist, &uiter))) {
+ struct btrfs_qgroup *qg;
+ struct btrfs_qgroup_list *glist;
+
+ qg = (struct btrfs_qgroup *)unode->aux;
+
+ qg->reserved -= num_bytes;
+
+ list_for_each_entry(glist, &qg->groups, next_group) {
+ ulist_add(ulist, glist->group->qgroupid,
+ (unsigned long)glist->group, GFP_ATOMIC);
+ }
+ }
+
+out:
+ spin_unlock(&fs_info->qgroup_lock);
+ ulist_free(ulist);
+}
+
+void assert_qgroups_uptodate(struct btrfs_trans_handle *trans)
+{
+ if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq)
+ return;
+ printk(KERN_ERR "btrfs: qgroups not uptodate in trans handle %p: list is%s empty, seq is %llu\n",
+ trans, list_empty(&trans->qgroup_ref_list) ? "" : " not",
+ trans->delayed_ref_elem.seq);
+ BUG();
+}
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 646ee21bb03..c5dbd914967 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1239,10 +1239,11 @@ static int __must_check __add_reloc_root(struct btrfs_root *root)
node->bytenr, &node->rb_node);
spin_unlock(&rc->reloc_root_tree.lock);
if (rb_node) {
- kfree(node);
btrfs_panic(root->fs_info, -EEXIST, "Duplicate root found "
"for start=%llu while inserting into relocation "
"tree\n");
+ kfree(node);
+ return -EEXIST;
}
list_add_tail(&root->root_list, &rc->reloc_roots);
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 24fb8ce4e07..6bb465cca20 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -16,12 +16,55 @@
* Boston, MA 021110-1307, USA.
*/
+#include <linux/uuid.h>
#include "ctree.h"
#include "transaction.h"
#include "disk-io.h"
#include "print-tree.h"
/*
+ * Read a root item from the tree. In case we detect a root item smaller then
+ * sizeof(root_item), we know it's an old version of the root structure and
+ * initialize all new fields to zero. The same happens if we detect mismatching
+ * generation numbers as then we know the root was once mounted with an older
+ * kernel that was not aware of the root item structure change.
+ */
+void btrfs_read_root_item(struct btrfs_root *root,
+ struct extent_buffer *eb, int slot,
+ struct btrfs_root_item *item)
+{
+ uuid_le uuid;
+ int len;
+ int need_reset = 0;
+
+ len = btrfs_item_size_nr(eb, slot);
+ read_extent_buffer(eb, item, btrfs_item_ptr_offset(eb, slot),
+ min_t(int, len, (int)sizeof(*item)));
+ if (len < sizeof(*item))
+ need_reset = 1;
+ if (!need_reset && btrfs_root_generation(item)
+ != btrfs_root_generation_v2(item)) {
+ if (btrfs_root_generation_v2(item) != 0) {
+ printk(KERN_WARNING "btrfs: mismatching "
+ "generation and generation_v2 "
+ "found in root item. This root "
+ "was probably mounted with an "
+ "older kernel. Resetting all "
+ "new fields.\n");
+ }
+ need_reset = 1;
+ }
+ if (need_reset) {
+ memset(&item->generation_v2, 0,
+ sizeof(*item) - offsetof(struct btrfs_root_item,
+ generation_v2));
+
+ uuid_le_gen(&uuid);
+ memcpy(item->uuid, uuid.b, BTRFS_UUID_SIZE);
+ }
+}
+
+/*
* lookup the root with the highest offset for a given objectid. The key we do
* find is copied into 'key'. If we find something return 0, otherwise 1, < 0
* on error.
@@ -61,10 +104,10 @@ int btrfs_find_last_root(struct btrfs_root *root, u64 objectid,
goto out;
}
if (item)
- read_extent_buffer(l, item, btrfs_item_ptr_offset(l, slot),
- sizeof(*item));
+ btrfs_read_root_item(root, l, slot, item);
if (key)
memcpy(key, &found_key, sizeof(found_key));
+
ret = 0;
out:
btrfs_free_path(path);
@@ -91,16 +134,15 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
int ret;
int slot;
unsigned long ptr;
+ int old_len;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
ret = btrfs_search_slot(trans, root, key, path, 0, 1);
- if (ret < 0) {
- btrfs_abort_transaction(trans, root, ret);
- goto out;
- }
+ if (ret < 0)
+ goto out_abort;
if (ret != 0) {
btrfs_print_leaf(root, path->nodes[0]);
@@ -113,16 +155,56 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
l = path->nodes[0];
slot = path->slots[0];
ptr = btrfs_item_ptr_offset(l, slot);
+ old_len = btrfs_item_size_nr(l, slot);
+
+ /*
+ * If this is the first time we update the root item which originated
+ * from an older kernel, we need to enlarge the item size to make room
+ * for the added fields.
+ */
+ if (old_len < sizeof(*item)) {
+ btrfs_release_path(path);
+ ret = btrfs_search_slot(trans, root, key, path,
+ -1, 1);
+ if (ret < 0)
+ goto out_abort;
+ ret = btrfs_del_item(trans, root, path);
+ if (ret < 0)
+ goto out_abort;
+ btrfs_release_path(path);
+ ret = btrfs_insert_empty_item(trans, root, path,
+ key, sizeof(*item));
+ if (ret < 0)
+ goto out_abort;
+ l = path->nodes[0];
+ slot = path->slots[0];
+ ptr = btrfs_item_ptr_offset(l, slot);
+ }
+
+ /*
+ * Update generation_v2 so at the next mount we know the new root
+ * fields are valid.
+ */
+ btrfs_set_root_generation_v2(item, btrfs_root_generation(item));
+
write_extent_buffer(l, item, ptr, sizeof(*item));
btrfs_mark_buffer_dirty(path->nodes[0]);
out:
btrfs_free_path(path);
return ret;
+
+out_abort:
+ btrfs_abort_transaction(trans, root, ret);
+ goto out;
}
int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_key *key, struct btrfs_root_item *item)
{
+ /*
+ * Make sure generation v1 and v2 match. See update_root for details.
+ */
+ btrfs_set_root_generation_v2(item, btrfs_root_generation(item));
return btrfs_insert_item(trans, root, key, item, sizeof(*item));
}
@@ -454,3 +536,16 @@ void btrfs_check_and_init_root_item(struct btrfs_root_item *root_item)
root_item->byte_limit = 0;
}
}
+
+void btrfs_update_root_times(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root)
+{
+ struct btrfs_root_item *item = &root->root_item;
+ struct timespec ct = CURRENT_TIME;
+
+ spin_lock(&root->root_times_lock);
+ item->ctransid = trans->transid;
+ item->ctime.sec = cpu_to_le64(ct.tv_sec);
+ item->ctime.nsec = cpu_to_le64(ct.tv_nsec);
+ spin_unlock(&root->root_times_lock);
+}
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
new file mode 100644
index 00000000000..c8ca49b1bb4
--- /dev/null
+++ b/fs/btrfs/send.c
@@ -0,0 +1,4571 @@
+/*
+ * Copyright (C) 2012 Alexander Block. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/bsearch.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/sort.h>
+#include <linux/mount.h>
+#include <linux/xattr.h>
+#include <linux/posix_acl_xattr.h>
+#include <linux/radix-tree.h>
+#include <linux/crc32c.h>
+
+#include "send.h"
+#include "backref.h"
+#include "locking.h"
+#include "disk-io.h"
+#include "btrfs_inode.h"
+#include "transaction.h"
+
+static int g_verbose = 0;
+
+#define verbose_printk(...) if (g_verbose) printk(__VA_ARGS__)
+
+/*
+ * A fs_path is a helper to dynamically build path names with unknown size.
+ * It reallocates the internal buffer on demand.
+ * It allows fast adding of path elements on the right side (normal path) and
+ * fast adding to the left side (reversed path). A reversed path can also be
+ * unreversed if needed.
+ */
+struct fs_path {
+ union {
+ struct {
+ char *start;
+ char *end;
+ char *prepared;
+
+ char *buf;
+ int buf_len;
+ int reversed:1;
+ int virtual_mem:1;
+ char inline_buf[];
+ };
+ char pad[PAGE_SIZE];
+ };
+};
+#define FS_PATH_INLINE_SIZE \
+ (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf))
+
+
+/* reused for each extent */
+struct clone_root {
+ struct btrfs_root *root;
+ u64 ino;
+ u64 offset;
+
+ u64 found_refs;
+};
+
+#define SEND_CTX_MAX_NAME_CACHE_SIZE 128
+#define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2)
+
+struct send_ctx {
+ struct file *send_filp;
+ loff_t send_off;
+ char *send_buf;
+ u32 send_size;
+ u32 send_max_size;
+ u64 total_send_size;
+ u64 cmd_send_size[BTRFS_SEND_C_MAX + 1];
+
+ struct vfsmount *mnt;
+
+ struct btrfs_root *send_root;
+ struct btrfs_root *parent_root;
+ struct clone_root *clone_roots;
+ int clone_roots_cnt;
+
+ /* current state of the compare_tree call */
+ struct btrfs_path *left_path;
+ struct btrfs_path *right_path;
+ struct btrfs_key *cmp_key;
+
+ /*
+ * infos of the currently processed inode. In case of deleted inodes,
+ * these are the values from the deleted inode.
+ */
+ u64 cur_ino;
+ u64 cur_inode_gen;
+ int cur_inode_new;
+ int cur_inode_new_gen;
+ int cur_inode_deleted;
+ int cur_inode_first_ref_orphan;
+ u64 cur_inode_size;
+ u64 cur_inode_mode;
+
+ u64 send_progress;
+
+ struct list_head new_refs;
+ struct list_head deleted_refs;
+
+ struct radix_tree_root name_cache;
+ struct list_head name_cache_list;
+ int name_cache_size;
+
+ struct file *cur_inode_filp;
+ char *read_buf;
+};
+
+struct name_cache_entry {
+ struct list_head list;
+ struct list_head use_list;
+ u64 ino;
+ u64 gen;
+ u64 parent_ino;
+ u64 parent_gen;
+ int ret;
+ int need_later_update;
+ int name_len;
+ char name[];
+};
+
+static void fs_path_reset(struct fs_path *p)
+{
+ if (p->reversed) {
+ p->start = p->buf + p->buf_len - 1;
+ p->end = p->start;
+ *p->start = 0;
+ } else {
+ p->start = p->buf;
+ p->end = p->start;
+ *p->start = 0;
+ }
+}
+
+static struct fs_path *fs_path_alloc(struct send_ctx *sctx)
+{
+ struct fs_path *p;
+
+ p = kmalloc(sizeof(*p), GFP_NOFS);
+ if (!p)
+ return NULL;
+ p->reversed = 0;
+ p->virtual_mem = 0;
+ p->buf = p->inline_buf;
+ p->buf_len = FS_PATH_INLINE_SIZE;
+ fs_path_reset(p);
+ return p;
+}
+
+static struct fs_path *fs_path_alloc_reversed(struct send_ctx *sctx)
+{
+ struct fs_path *p;
+
+ p = fs_path_alloc(sctx);
+ if (!p)
+ return NULL;
+ p->reversed = 1;
+ fs_path_reset(p);
+ return p;
+}
+
+static void fs_path_free(struct send_ctx *sctx, struct fs_path *p)
+{
+ if (!p)
+ return;
+ if (p->buf != p->inline_buf) {
+ if (p->virtual_mem)
+ vfree(p->buf);
+ else
+ kfree(p->buf);
+ }
+ kfree(p);
+}
+
+static int fs_path_len(struct fs_path *p)
+{
+ return p->end - p->start;
+}
+
+static int fs_path_ensure_buf(struct fs_path *p, int len)
+{
+ char *tmp_buf;
+ int path_len;
+ int old_buf_len;
+
+ len++;
+
+ if (p->buf_len >= len)
+ return 0;
+
+ path_len = p->end - p->start;
+ old_buf_len = p->buf_len;
+ len = PAGE_ALIGN(len);
+
+ if (p->buf == p->inline_buf) {
+ tmp_buf = kmalloc(len, GFP_NOFS);
+ if (!tmp_buf) {
+ tmp_buf = vmalloc(len);
+ if (!tmp_buf)
+ return -ENOMEM;
+ p->virtual_mem = 1;
+ }
+ memcpy(tmp_buf, p->buf, p->buf_len);
+ p->buf = tmp_buf;
+ p->buf_len = len;
+ } else {
+ if (p->virtual_mem) {
+ tmp_buf = vmalloc(len);
+ if (!tmp_buf)
+ return -ENOMEM;
+ memcpy(tmp_buf, p->buf, p->buf_len);
+ vfree(p->buf);
+ } else {
+ tmp_buf = krealloc(p->buf, len, GFP_NOFS);
+ if (!tmp_buf) {
+ tmp_buf = vmalloc(len);
+ if (!tmp_buf)
+ return -ENOMEM;
+ memcpy(tmp_buf, p->buf, p->buf_len);
+ kfree(p->buf);
+ p->virtual_mem = 1;
+ }
+ }
+ p->buf = tmp_buf;
+ p->buf_len = len;
+ }
+ if (p->reversed) {
+ tmp_buf = p->buf + old_buf_len - path_len - 1;
+ p->end = p->buf + p->buf_len - 1;
+ p->start = p->end - path_len;
+ memmove(p->start, tmp_buf, path_len + 1);
+ } else {
+ p->start = p->buf;
+ p->end = p->start + path_len;
+ }
+ return 0;
+}
+
+static int fs_path_prepare_for_add(struct fs_path *p, int name_len)
+{
+ int ret;
+ int new_len;
+
+ new_len = p->end - p->start + name_len;
+ if (p->start != p->end)
+ new_len++;
+ ret = fs_path_ensure_buf(p, new_len);
+ if (ret < 0)
+ goto out;
+
+ if (p->reversed) {
+ if (p->start != p->end)
+ *--p->start = '/';
+ p->start -= name_len;
+ p->prepared = p->start;
+ } else {
+ if (p->start != p->end)
+ *p->end++ = '/';
+ p->prepared = p->end;
+ p->end += name_len;
+ *p->end = 0;
+ }
+
+out:
+ return ret;
+}
+
+static int fs_path_add(struct fs_path *p, const char *name, int name_len)
+{
+ int ret;
+
+ ret = fs_path_prepare_for_add(p, name_len);
+ if (ret < 0)
+ goto out;
+ memcpy(p->prepared, name, name_len);
+ p->prepared = NULL;
+
+out:
+ return ret;
+}
+
+static int fs_path_add_path(struct fs_path *p, struct fs_path *p2)
+{
+ int ret;
+
+ ret = fs_path_prepare_for_add(p, p2->end - p2->start);
+ if (ret < 0)
+ goto out;
+ memcpy(p->prepared, p2->start, p2->end - p2->start);
+ p->prepared = NULL;
+
+out:
+ return ret;
+}
+
+static int fs_path_add_from_extent_buffer(struct fs_path *p,
+ struct extent_buffer *eb,
+ unsigned long off, int len)
+{
+ int ret;
+
+ ret = fs_path_prepare_for_add(p, len);
+ if (ret < 0)
+ goto out;
+
+ read_extent_buffer(eb, p->prepared, off, len);
+ p->prepared = NULL;
+
+out:
+ return ret;
+}
+
+static void fs_path_remove(struct fs_path *p)
+{
+ BUG_ON(p->reversed);
+ while (p->start != p->end && *p->end != '/')
+ p->end--;
+ *p->end = 0;
+}
+
+static int fs_path_copy(struct fs_path *p, struct fs_path *from)
+{
+ int ret;
+
+ p->reversed = from->reversed;
+ fs_path_reset(p);
+
+ ret = fs_path_add_path(p, from);
+
+ return ret;
+}
+
+
+static void fs_path_unreverse(struct fs_path *p)
+{
+ char *tmp;
+ int len;
+
+ if (!p->reversed)
+ return;
+
+ tmp = p->start;
+ len = p->end - p->start;
+ p->start = p->buf;
+ p->end = p->start + len;
+ memmove(p->start, tmp, len + 1);
+ p->reversed = 0;
+}
+
+static struct btrfs_path *alloc_path_for_send(void)
+{
+ struct btrfs_path *path;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return NULL;
+ path->search_commit_root = 1;
+ path->skip_locking = 1;
+ return path;
+}
+
+static int write_buf(struct send_ctx *sctx, const void *buf, u32 len)
+{
+ int ret;
+ mm_segment_t old_fs;
+ u32 pos = 0;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ while (pos < len) {
+ ret = vfs_write(sctx->send_filp, (char *)buf + pos, len - pos,
+ &sctx->send_off);
+ /* TODO handle that correctly */
+ /*if (ret == -ERESTARTSYS) {
+ continue;
+ }*/
+ if (ret < 0)
+ goto out;
+ if (ret == 0) {
+ ret = -EIO;
+ goto out;
+ }
+ pos += ret;
+ }
+
+ ret = 0;
+
+out:
+ set_fs(old_fs);
+ return ret;
+}
+
+static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len)
+{
+ struct btrfs_tlv_header *hdr;
+ int total_len = sizeof(*hdr) + len;
+ int left = sctx->send_max_size - sctx->send_size;
+
+ if (unlikely(left < total_len))
+ return -EOVERFLOW;
+
+ hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size);
+ hdr->tlv_type = cpu_to_le16(attr);
+ hdr->tlv_len = cpu_to_le16(len);
+ memcpy(hdr + 1, data, len);
+ sctx->send_size += total_len;
+
+ return 0;
+}
+
+#if 0
+static int tlv_put_u8(struct send_ctx *sctx, u16 attr, u8 value)
+{
+ return tlv_put(sctx, attr, &value, sizeof(value));
+}
+
+static int tlv_put_u16(struct send_ctx *sctx, u16 attr, u16 value)
+{
+ __le16 tmp = cpu_to_le16(value);
+ return tlv_put(sctx, attr, &tmp, sizeof(tmp));
+}
+
+static int tlv_put_u32(struct send_ctx *sctx, u16 attr, u32 value)
+{
+ __le32 tmp = cpu_to_le32(value);
+ return tlv_put(sctx, attr, &tmp, sizeof(tmp));
+}
+#endif
+
+static int tlv_put_u64(struct send_ctx *sctx, u16 attr, u64 value)
+{
+ __le64 tmp = cpu_to_le64(value);
+ return tlv_put(sctx, attr, &tmp, sizeof(tmp));
+}
+
+static int tlv_put_string(struct send_ctx *sctx, u16 attr,
+ const char *str, int len)
+{
+ if (len == -1)
+ len = strlen(str);
+ return tlv_put(sctx, attr, str, len);
+}
+
+static int tlv_put_uuid(struct send_ctx *sctx, u16 attr,
+ const u8 *uuid)
+{
+ return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE);
+}
+
+#if 0
+static int tlv_put_timespec(struct send_ctx *sctx, u16 attr,
+ struct timespec *ts)
+{
+ struct btrfs_timespec bts;
+ bts.sec = cpu_to_le64(ts->tv_sec);
+ bts.nsec = cpu_to_le32(ts->tv_nsec);
+ return tlv_put(sctx, attr, &bts, sizeof(bts));
+}
+#endif
+
+static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr,
+ struct extent_buffer *eb,
+ struct btrfs_timespec *ts)
+{
+ struct btrfs_timespec bts;
+ read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts));
+ return tlv_put(sctx, attr, &bts, sizeof(bts));
+}
+
+
+#define TLV_PUT(sctx, attrtype, attrlen, data) \
+ do { \
+ ret = tlv_put(sctx, attrtype, attrlen, data); \
+ if (ret < 0) \
+ goto tlv_put_failure; \
+ } while (0)
+
+#define TLV_PUT_INT(sctx, attrtype, bits, value) \
+ do { \
+ ret = tlv_put_u##bits(sctx, attrtype, value); \
+ if (ret < 0) \
+ goto tlv_put_failure; \
+ } while (0)
+
+#define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data)
+#define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data)
+#define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data)
+#define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data)
+#define TLV_PUT_STRING(sctx, attrtype, str, len) \
+ do { \
+ ret = tlv_put_string(sctx, attrtype, str, len); \
+ if (ret < 0) \
+ goto tlv_put_failure; \
+ } while (0)
+#define TLV_PUT_PATH(sctx, attrtype, p) \
+ do { \
+ ret = tlv_put_string(sctx, attrtype, p->start, \
+ p->end - p->start); \
+ if (ret < 0) \
+ goto tlv_put_failure; \
+ } while(0)
+#define TLV_PUT_UUID(sctx, attrtype, uuid) \
+ do { \
+ ret = tlv_put_uuid(sctx, attrtype, uuid); \
+ if (ret < 0) \
+ goto tlv_put_failure; \
+ } while (0)
+#define TLV_PUT_TIMESPEC(sctx, attrtype, ts) \
+ do { \
+ ret = tlv_put_timespec(sctx, attrtype, ts); \
+ if (ret < 0) \
+ goto tlv_put_failure; \
+ } while (0)
+#define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
+ do { \
+ ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
+ if (ret < 0) \
+ goto tlv_put_failure; \
+ } while (0)
+
+static int send_header(struct send_ctx *sctx)
+{
+ struct btrfs_stream_header hdr;
+
+ strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC);
+ hdr.version = cpu_to_le32(BTRFS_SEND_STREAM_VERSION);
+
+ return write_buf(sctx, &hdr, sizeof(hdr));
+}
+
+/*
+ * For each command/item we want to send to userspace, we call this function.
+ */
+static int begin_cmd(struct send_ctx *sctx, int cmd)
+{
+ struct btrfs_cmd_header *hdr;
+
+ if (!sctx->send_buf) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ BUG_ON(sctx->send_size);
+
+ sctx->send_size += sizeof(*hdr);
+ hdr = (struct btrfs_cmd_header *)sctx->send_buf;
+ hdr->cmd = cpu_to_le16(cmd);
+
+ return 0;
+}
+
+static int send_cmd(struct send_ctx *sctx)
+{
+ int ret;
+ struct btrfs_cmd_header *hdr;
+ u32 crc;
+
+ hdr = (struct btrfs_cmd_header *)sctx->send_buf;
+ hdr->len = cpu_to_le32(sctx->send_size - sizeof(*hdr));
+ hdr->crc = 0;
+
+ crc = crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
+ hdr->crc = cpu_to_le32(crc);
+
+ ret = write_buf(sctx, sctx->send_buf, sctx->send_size);
+
+ sctx->total_send_size += sctx->send_size;
+ sctx->cmd_send_size[le16_to_cpu(hdr->cmd)] += sctx->send_size;
+ sctx->send_size = 0;
+
+ return ret;
+}
+
+/*
+ * Sends a move instruction to user space
+ */
+static int send_rename(struct send_ctx *sctx,
+ struct fs_path *from, struct fs_path *to)
+{
+ int ret;
+
+verbose_printk("btrfs: send_rename %s -> %s\n", from->start, to->start);
+
+ ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME);
+ if (ret < 0)
+ goto out;
+
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from);
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to);
+
+ ret = send_cmd(sctx);
+
+tlv_put_failure:
+out:
+ return ret;
+}
+
+/*
+ * Sends a link instruction to user space
+ */
+static int send_link(struct send_ctx *sctx,
+ struct fs_path *path, struct fs_path *lnk)
+{
+ int ret;
+
+verbose_printk("btrfs: send_link %s -> %s\n", path->start, lnk->start);
+
+ ret = begin_cmd(sctx, BTRFS_SEND_C_LINK);
+ if (ret < 0)
+ goto out;
+
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk);
+
+ ret = send_cmd(sctx);
+
+tlv_put_failure:
+out:
+ return ret;
+}
+
+/*
+ * Sends an unlink instruction to user space
+ */
+static int send_unlink(struct send_ctx *sctx, struct fs_path *path)
+{
+ int ret;
+
+verbose_printk("btrfs: send_unlink %s\n", path->start);
+
+ ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK);
+ if (ret < 0)
+ goto out;
+
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
+
+ ret = send_cmd(sctx);
+
+tlv_put_failure:
+out:
+ return ret;
+}
+
+/*
+ * Sends a rmdir instruction to user space
+ */
+static int send_rmdir(struct send_ctx *sctx, struct fs_path *path)
+{
+ int ret;
+
+verbose_printk("btrfs: send_rmdir %s\n", path->start);
+
+ ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR);
+ if (ret < 0)
+ goto out;
+
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
+
+ ret = send_cmd(sctx);
+
+tlv_put_failure:
+out:
+ return ret;
+}
+
+/*
+ * Helper function to retrieve some fields from an inode item.
+ */
+static int get_inode_info(struct btrfs_root *root,
+ u64 ino, u64 *size, u64 *gen,
+ u64 *mode, u64 *uid, u64 *gid)
+{
+ int ret;
+ struct btrfs_inode_item *ii;
+ struct btrfs_key key;
+ struct btrfs_path *path;
+
+ path = alloc_path_for_send();
+ if (!path)
+ return -ENOMEM;
+
+ key.objectid = ino;
+ key.type = BTRFS_INODE_ITEM_KEY;
+ key.offset = 0;
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0)
+ goto out;
+ if (ret) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ ii = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_inode_item);
+ if (size)
+ *size = btrfs_inode_size(path->nodes[0], ii);
+ if (gen)
+ *gen = btrfs_inode_generation(path->nodes[0], ii);
+ if (mode)
+ *mode = btrfs_inode_mode(path->nodes[0], ii);
+ if (uid)
+ *uid = btrfs_inode_uid(path->nodes[0], ii);
+ if (gid)
+ *gid = btrfs_inode_gid(path->nodes[0], ii);
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index,
+ struct fs_path *p,
+ void *ctx);
+
+/*
+ * Helper function to iterate the entries in ONE btrfs_inode_ref.
+ * The iterate callback may return a non zero value to stop iteration. This can
+ * be a negative value for error codes or 1 to simply stop it.
+ *
+ * path must point to the INODE_REF when called.
+ */
+static int iterate_inode_ref(struct send_ctx *sctx,
+ struct btrfs_root *root, struct btrfs_path *path,
+ struct btrfs_key *found_key, int resolve,
+ iterate_inode_ref_t iterate, void *ctx)
+{
+ struct extent_buffer *eb;
+ struct btrfs_item *item;
+ struct btrfs_inode_ref *iref;
+ struct btrfs_path *tmp_path;
+ struct fs_path *p;
+ u32 cur;
+ u32 len;
+ u32 total;
+ int slot;
+ u32 name_len;
+ char *start;
+ int ret = 0;
+ int num;
+ int index;
+
+ p = fs_path_alloc_reversed(sctx);
+ if (!p)
+ return -ENOMEM;
+
+ tmp_path = alloc_path_for_send();
+ if (!tmp_path) {
+ fs_path_free(sctx, p);
+ return -ENOMEM;
+ }
+
+ eb = path->nodes[0];
+ slot = path->slots[0];
+ item = btrfs_item_nr(eb, slot);
+ iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
+ cur = 0;
+ len = 0;
+ total = btrfs_item_size(eb, item);
+
+ num = 0;
+ while (cur < total) {
+ fs_path_reset(p);
+
+ name_len = btrfs_inode_ref_name_len(eb, iref);
+ index = btrfs_inode_ref_index(eb, iref);
+ if (resolve) {
+ start = btrfs_iref_to_path(root, tmp_path, iref, eb,
+ found_key->offset, p->buf,
+ p->buf_len);
+ if (IS_ERR(start)) {
+ ret = PTR_ERR(start);
+ goto out;
+ }
+ if (start < p->buf) {
+ /* overflow , try again with larger buffer */
+ ret = fs_path_ensure_buf(p,
+ p->buf_len + p->buf - start);
+ if (ret < 0)
+ goto out;
+ start = btrfs_iref_to_path(root, tmp_path, iref,
+ eb, found_key->offset, p->buf,
+ p->buf_len);
+ if (IS_ERR(start)) {
+ ret = PTR_ERR(start);
+ goto out;
+ }
+ BUG_ON(start < p->buf);
+ }
+ p->start = start;
+ } else {
+ ret = fs_path_add_from_extent_buffer(p, eb,
+ (unsigned long)(iref + 1), name_len);
+ if (ret < 0)
+ goto out;
+ }
+
+
+ len = sizeof(*iref) + name_len;
+ iref = (struct btrfs_inode_ref *)((char *)iref + len);
+ cur += len;
+
+ ret = iterate(num, found_key->offset, index, p, ctx);
+ if (ret)
+ goto out;
+
+ num++;
+ }
+
+out:
+ btrfs_free_path(tmp_path);
+ fs_path_free(sctx, p);
+ return ret;
+}
+
+typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key,
+ const char *name, int name_len,
+ const char *data, int data_len,
+ u8 type, void *ctx);
+
+/*
+ * Helper function to iterate the entries in ONE btrfs_dir_item.
+ * The iterate callback may return a non zero value to stop iteration. This can
+ * be a negative value for error codes or 1 to simply stop it.
+ *
+ * path must point to the dir item when called.
+ */
+static int iterate_dir_item(struct send_ctx *sctx,
+ struct btrfs_root *root, struct btrfs_path *path,
+ struct btrfs_key *found_key,
+ iterate_dir_item_t iterate, void *ctx)
+{
+ int ret = 0;
+ struct extent_buffer *eb;
+ struct btrfs_item *item;
+ struct btrfs_dir_item *di;
+ struct btrfs_path *tmp_path = NULL;
+ struct btrfs_key di_key;
+ char *buf = NULL;
+ char *buf2 = NULL;
+ int buf_len;
+ int buf_virtual = 0;
+ u32 name_len;
+ u32 data_len;
+ u32 cur;
+ u32 len;
+ u32 total;
+ int slot;
+ int num;
+ u8 type;
+
+ buf_len = PAGE_SIZE;
+ buf = kmalloc(buf_len, GFP_NOFS);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ tmp_path = alloc_path_for_send();
+ if (!tmp_path) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ eb = path->nodes[0];
+ slot = path->slots[0];
+ item = btrfs_item_nr(eb, slot);
+ di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
+ cur = 0;
+ len = 0;
+ total = btrfs_item_size(eb, item);
+
+ num = 0;
+ while (cur < total) {
+ name_len = btrfs_dir_name_len(eb, di);
+ data_len = btrfs_dir_data_len(eb, di);
+ type = btrfs_dir_type(eb, di);
+ btrfs_dir_item_key_to_cpu(eb, di, &di_key);
+
+ if (name_len + data_len > buf_len) {
+ buf_len = PAGE_ALIGN(name_len + data_len);
+ if (buf_virtual) {
+ buf2 = vmalloc(buf_len);
+ if (!buf2) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ vfree(buf);
+ } else {
+ buf2 = krealloc(buf, buf_len, GFP_NOFS);
+ if (!buf2) {
+ buf2 = vmalloc(buf_len);
+ if (!buf2) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ kfree(buf);
+ buf_virtual = 1;
+ }
+ }
+
+ buf = buf2;
+ buf2 = NULL;
+ }
+
+ read_extent_buffer(eb, buf, (unsigned long)(di + 1),
+ name_len + data_len);
+
+ len = sizeof(*di) + name_len + data_len;
+ di = (struct btrfs_dir_item *)((char *)di + len);
+ cur += len;
+
+ ret = iterate(num, &di_key, buf, name_len, buf + name_len,
+ data_len, type, ctx);
+ if (ret < 0)
+ goto out;
+ if (ret) {
+ ret = 0;
+ goto out;
+ }
+
+ num++;
+ }
+
+out:
+ btrfs_free_path(tmp_path);
+ if (buf_virtual)
+ vfree(buf);
+ else
+ kfree(buf);
+ return ret;
+}
+
+static int __copy_first_ref(int num, u64 dir, int index,
+ struct fs_path *p, void *ctx)
+{
+ int ret;
+ struct fs_path *pt = ctx;
+
+ ret = fs_path_copy(pt, p);
+ if (ret < 0)
+ return ret;
+
+ /* we want the first only */
+ return 1;
+}
+
+/*
+ * Retrieve the first path of an inode. If an inode has more then one
+ * ref/hardlink, this is ignored.
+ */
+static int get_inode_path(struct send_ctx *sctx, struct btrfs_root *root,
+ u64 ino, struct fs_path *path)
+{
+ int ret;
+ struct btrfs_key key, found_key;
+ struct btrfs_path *p;
+
+ p = alloc_path_for_send();
+ if (!p)
+ return -ENOMEM;
+
+ fs_path_reset(path);
+
+ key.objectid = ino;
+ key.type = BTRFS_INODE_REF_KEY;
+ key.offset = 0;
+
+ ret = btrfs_search_slot_for_read(root, &key, p, 1, 0);
+ if (ret < 0)
+ goto out;
+ if (ret) {
+ ret = 1;
+ goto out;
+ }
+ btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]);
+ if (found_key.objectid != ino ||
+ found_key.type != BTRFS_INODE_REF_KEY) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ ret = iterate_inode_ref(sctx, root, p, &found_key, 1,
+ __copy_first_ref, path);
+ if (ret < 0)
+ goto out;
+ ret = 0;
+
+out:
+ btrfs_free_path(p);
+ return ret;
+}
+
+struct backref_ctx {
+ struct send_ctx *sctx;
+
+ /* number of total found references */
+ u64 found;
+
+ /*
+ * used for clones found in send_root. clones found behind cur_objectid
+ * and cur_offset are not considered as allowed clones.
+ */
+ u64 cur_objectid;
+ u64 cur_offset;
+
+ /* may be truncated in case it's the last extent in a file */
+ u64 extent_len;
+
+ /* Just to check for bugs in backref resolving */
+ int found_in_send_root;
+};
+
+static int __clone_root_cmp_bsearch(const void *key, const void *elt)
+{
+ u64 root = (u64)key;
+ struct clone_root *cr = (struct clone_root *)elt;
+
+ if (root < cr->root->objectid)
+ return -1;
+ if (root > cr->root->objectid)
+ return 1;
+ return 0;
+}
+
+static int __clone_root_cmp_sort(const void *e1, const void *e2)
+{
+ struct clone_root *cr1 = (struct clone_root *)e1;
+ struct clone_root *cr2 = (struct clone_root *)e2;
+
+ if (cr1->root->objectid < cr2->root->objectid)
+ return -1;
+ if (cr1->root->objectid > cr2->root->objectid)
+ return 1;
+ return 0;
+}
+
+/*
+ * Called for every backref that is found for the current extent.
+ */
+static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
+{
+ struct backref_ctx *bctx = ctx_;
+ struct clone_root *found;
+ int ret;
+ u64 i_size;
+
+ /* First check if the root is in the list of accepted clone sources */
+ found = bsearch((void *)root, bctx->sctx->clone_roots,
+ bctx->sctx->clone_roots_cnt,
+ sizeof(struct clone_root),
+ __clone_root_cmp_bsearch);
+ if (!found)
+ return 0;
+
+ if (found->root == bctx->sctx->send_root &&
+ ino == bctx->cur_objectid &&
+ offset == bctx->cur_offset) {
+ bctx->found_in_send_root = 1;
+ }
+
+ /*
+ * There are inodes that have extents that lie behind it's i_size. Don't
+ * accept clones from these extents.
+ */
+ ret = get_inode_info(found->root, ino, &i_size, NULL, NULL, NULL, NULL);
+ if (ret < 0)
+ return ret;
+
+ if (offset + bctx->extent_len > i_size)
+ return 0;
+
+ /*
+ * Make sure we don't consider clones from send_root that are
+ * behind the current inode/offset.
+ */
+ if (found->root == bctx->sctx->send_root) {
+ /*
+ * TODO for the moment we don't accept clones from the inode
+ * that is currently send. We may change this when
+ * BTRFS_IOC_CLONE_RANGE supports cloning from and to the same
+ * file.
+ */
+ if (ino >= bctx->cur_objectid)
+ return 0;
+ /*if (ino > ctx->cur_objectid)
+ return 0;
+ if (offset + ctx->extent_len > ctx->cur_offset)
+ return 0;*/
+
+ bctx->found++;
+ found->found_refs++;
+ found->ino = ino;
+ found->offset = offset;
+ return 0;
+ }
+
+ bctx->found++;
+ found->found_refs++;
+ if (ino < found->ino) {
+ found->ino = ino;
+ found->offset = offset;
+ } else if (found->ino == ino) {
+ /*
+ * same extent found more then once in the same file.
+ */
+ if (found->offset > offset + bctx->extent_len)
+ found->offset = offset;
+ }
+
+ return 0;
+}
+
+/*
+ * path must point to the extent item when called.
+ */
+static int find_extent_clone(struct send_ctx *sctx,
+ struct btrfs_path *path,
+ u64 ino, u64 data_offset,
+ u64 ino_size,
+ struct clone_root **found)
+{
+ int ret;
+ int extent_type;
+ u64 logical;
+ u64 num_bytes;
+ u64 extent_item_pos;
+ struct btrfs_file_extent_item *fi;
+ struct extent_buffer *eb = path->nodes[0];
+ struct backref_ctx backref_ctx;
+ struct clone_root *cur_clone_root;
+ struct btrfs_key found_key;
+ struct btrfs_path *tmp_path;
+ u32 i;
+
+ tmp_path = alloc_path_for_send();
+ if (!tmp_path)
+ return -ENOMEM;
+
+ if (data_offset >= ino_size) {
+ /*
+ * There may be extents that lie behind the file's size.
+ * I at least had this in combination with snapshotting while
+ * writing large files.
+ */
+ ret = 0;
+ goto out;
+ }
+
+ fi = btrfs_item_ptr(eb, path->slots[0],
+ struct btrfs_file_extent_item);
+ extent_type = btrfs_file_extent_type(eb, fi);
+ if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ num_bytes = btrfs_file_extent_num_bytes(eb, fi);
+ logical = btrfs_file_extent_disk_bytenr(eb, fi);
+ if (logical == 0) {
+ ret = -ENOENT;
+ goto out;
+ }
+ logical += btrfs_file_extent_offset(eb, fi);
+
+ ret = extent_from_logical(sctx->send_root->fs_info,
+ logical, tmp_path, &found_key);
+ btrfs_release_path(tmp_path);
+
+ if (ret < 0)
+ goto out;
+ if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
+ ret = -EIO;
+ goto out;
+ }
+
+ /*
+ * Setup the clone roots.
+ */
+ for (i = 0; i < sctx->clone_roots_cnt; i++) {
+ cur_clone_root = sctx->clone_roots + i;
+ cur_clone_root->ino = (u64)-1;
+ cur_clone_root->offset = 0;
+ cur_clone_root->found_refs = 0;
+ }
+
+ backref_ctx.sctx = sctx;
+ backref_ctx.found = 0;
+ backref_ctx.cur_objectid = ino;
+ backref_ctx.cur_offset = data_offset;
+ backref_ctx.found_in_send_root = 0;
+ backref_ctx.extent_len = num_bytes;
+
+ /*
+ * The last extent of a file may be too large due to page alignment.
+ * We need to adjust extent_len in this case so that the checks in
+ * __iterate_backrefs work.
+ */
+ if (data_offset + num_bytes >= ino_size)
+ backref_ctx.extent_len = ino_size - data_offset;
+
+ /*
+ * Now collect all backrefs.
+ */
+ extent_item_pos = logical - found_key.objectid;
+ ret = iterate_extent_inodes(sctx->send_root->fs_info,
+ found_key.objectid, extent_item_pos, 1,
+ __iterate_backrefs, &backref_ctx);
+ if (ret < 0)
+ goto out;
+
+ if (!backref_ctx.found_in_send_root) {
+ /* found a bug in backref code? */
+ ret = -EIO;
+ printk(KERN_ERR "btrfs: ERROR did not find backref in "
+ "send_root. inode=%llu, offset=%llu, "
+ "logical=%llu\n",
+ ino, data_offset, logical);
+ goto out;
+ }
+
+verbose_printk(KERN_DEBUG "btrfs: find_extent_clone: data_offset=%llu, "
+ "ino=%llu, "
+ "num_bytes=%llu, logical=%llu\n",
+ data_offset, ino, num_bytes, logical);
+
+ if (!backref_ctx.found)
+ verbose_printk("btrfs: no clones found\n");
+
+ cur_clone_root = NULL;
+ for (i = 0; i < sctx->clone_roots_cnt; i++) {
+ if (sctx->clone_roots[i].found_refs) {
+ if (!cur_clone_root)
+ cur_clone_root = sctx->clone_roots + i;
+ else if (sctx->clone_roots[i].root == sctx->send_root)
+ /* prefer clones from send_root over others */
+ cur_clone_root = sctx->clone_roots + i;
+ break;
+ }
+
+ }
+
+ if (cur_clone_root) {
+ *found = cur_clone_root;
+ ret = 0;
+ } else {
+ ret = -ENOENT;
+ }
+
+out:
+ btrfs_free_path(tmp_path);
+ return ret;
+}
+
+static int read_symlink(struct send_ctx *sctx,
+ struct btrfs_root *root,
+ u64 ino,
+ struct fs_path *dest)
+{
+ int ret;
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ struct btrfs_file_extent_item *ei;
+ u8 type;
+ u8 compression;
+ unsigned long off;
+ int len;
+
+ path = alloc_path_for_send();
+ if (!path)
+ return -ENOMEM;
+
+ key.objectid = ino;
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = 0;
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0)
+ goto out;
+ BUG_ON(ret);
+
+ ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_file_extent_item);
+ type = btrfs_file_extent_type(path->nodes[0], ei);
+ compression = btrfs_file_extent_compression(path->nodes[0], ei);
+ BUG_ON(type != BTRFS_FILE_EXTENT_INLINE);
+ BUG_ON(compression);
+
+ off = btrfs_file_extent_inline_start(ei);
+ len = btrfs_file_extent_inline_len(path->nodes[0], ei);
+
+ ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
+ if (ret < 0)
+ goto out;
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+/*
+ * Helper function to generate a file name that is unique in the root of
+ * send_root and parent_root. This is used to generate names for orphan inodes.
+ */
+static int gen_unique_name(struct send_ctx *sctx,
+ u64 ino, u64 gen,
+ struct fs_path *dest)
+{
+ int ret = 0;
+ struct btrfs_path *path;
+ struct btrfs_dir_item *di;
+ char tmp[64];
+ int len;
+ u64 idx = 0;
+
+ path = alloc_path_for_send();
+ if (!path)
+ return -ENOMEM;
+
+ while (1) {
+ len = snprintf(tmp, sizeof(tmp) - 1, "o%llu-%llu-%llu",
+ ino, gen, idx);
+ if (len >= sizeof(tmp)) {
+ /* should really not happen */
+ ret = -EOVERFLOW;
+ goto out;
+ }
+
+ di = btrfs_lookup_dir_item(NULL, sctx->send_root,
+ path, BTRFS_FIRST_FREE_OBJECTID,
+ tmp, strlen(tmp), 0);
+ btrfs_release_path(path);
+ if (IS_ERR(di)) {
+ ret = PTR_ERR(di);
+ goto out;
+ }
+ if (di) {
+ /* not unique, try again */
+ idx++;
+ continue;
+ }
+
+ if (!sctx->parent_root) {
+ /* unique */
+ ret = 0;
+ break;
+ }
+
+ di = btrfs_lookup_dir_item(NULL, sctx->parent_root,
+ path, BTRFS_FIRST_FREE_OBJECTID,
+ tmp, strlen(tmp), 0);
+ btrfs_release_path(path);
+ if (IS_ERR(di)) {
+ ret = PTR_ERR(di);
+ goto out;
+ }
+ if (di) {
+ /* not unique, try again */
+ idx++;
+ continue;
+ }
+ /* unique */
+ break;
+ }
+
+ ret = fs_path_add(dest, tmp, strlen(tmp));
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+enum inode_state {
+ inode_state_no_change,
+ inode_state_will_create,
+ inode_state_did_create,
+ inode_state_will_delete,
+ inode_state_did_delete,
+};
+
+static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen)
+{
+ int ret;
+ int left_ret;
+ int right_ret;
+ u64 left_gen;
+ u64 right_gen;
+
+ ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL,
+ NULL);
+ if (ret < 0 && ret != -ENOENT)
+ goto out;
+ left_ret = ret;
+
+ if (!sctx->parent_root) {
+ right_ret = -ENOENT;
+ } else {
+ ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen,
+ NULL, NULL, NULL);
+ if (ret < 0 && ret != -ENOENT)
+ goto out;
+ right_ret = ret;
+ }
+
+ if (!left_ret && !right_ret) {
+ if (left_gen == gen && right_gen == gen)
+ ret = inode_state_no_change;
+ else if (left_gen == gen) {
+ if (ino < sctx->send_progress)
+ ret = inode_state_did_create;
+ else
+ ret = inode_state_will_create;
+ } else if (right_gen == gen) {
+ if (ino < sctx->send_progress)
+ ret = inode_state_did_delete;
+ else
+ ret = inode_state_will_delete;
+ } else {
+ ret = -ENOENT;
+ }
+ } else if (!left_ret) {
+ if (left_gen == gen) {
+ if (ino < sctx->send_progress)
+ ret = inode_state_did_create;
+ else
+ ret = inode_state_will_create;
+ } else {
+ ret = -ENOENT;
+ }
+ } else if (!right_ret) {
+ if (right_gen == gen) {
+ if (ino < sctx->send_progress)
+ ret = inode_state_did_delete;
+ else
+ ret = inode_state_will_delete;
+ } else {
+ ret = -ENOENT;
+ }
+ } else {
+ ret = -ENOENT;
+ }
+
+out:
+ return ret;
+}
+
+static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen)
+{
+ int ret;
+
+ ret = get_cur_inode_state(sctx, ino, gen);
+ if (ret < 0)
+ goto out;
+
+ if (ret == inode_state_no_change ||
+ ret == inode_state_did_create ||
+ ret == inode_state_will_delete)
+ ret = 1;
+ else
+ ret = 0;
+
+out:
+ return ret;
+}
+
+/*
+ * Helper function to lookup a dir item in a dir.
+ */
+static int lookup_dir_item_inode(struct btrfs_root *root,
+ u64 dir, const char *name, int name_len,
+ u64 *found_inode,
+ u8 *found_type)
+{
+ int ret = 0;
+ struct btrfs_dir_item *di;
+ struct btrfs_key key;
+ struct btrfs_path *path;
+
+ path = alloc_path_for_send();
+ if (!path)
+ return -ENOMEM;
+
+ di = btrfs_lookup_dir_item(NULL, root, path,
+ dir, name, name_len, 0);
+ if (!di) {
+ ret = -ENOENT;
+ goto out;
+ }
+ if (IS_ERR(di)) {
+ ret = PTR_ERR(di);
+ goto out;
+ }
+ btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
+ *found_inode = key.objectid;
+ *found_type = btrfs_dir_type(path->nodes[0], di);
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+static int get_first_ref(struct send_ctx *sctx,
+ struct btrfs_root *root, u64 ino,
+ u64 *dir, u64 *dir_gen, struct fs_path *name)
+{
+ int ret;
+ struct btrfs_key key;
+ struct btrfs_key found_key;
+ struct btrfs_path *path;
+ struct btrfs_inode_ref *iref;
+ int len;
+
+ path = alloc_path_for_send();
+ if (!path)
+ return -ENOMEM;
+
+ key.objectid = ino;
+ key.type = BTRFS_INODE_REF_KEY;
+ key.offset = 0;
+
+ ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
+ if (ret < 0)
+ goto out;
+ if (!ret)
+ btrfs_item_key_to_cpu(path->nodes[0], &found_key,
+ path->slots[0]);
+ if (ret || found_key.objectid != key.objectid ||
+ found_key.type != key.type) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_inode_ref);
+ len = btrfs_inode_ref_name_len(path->nodes[0], iref);
+ ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
+ (unsigned long)(iref + 1), len);
+ if (ret < 0)
+ goto out;
+ btrfs_release_path(path);
+
+ ret = get_inode_info(root, found_key.offset, NULL, dir_gen, NULL, NULL,
+ NULL);
+ if (ret < 0)
+ goto out;
+
+ *dir = found_key.offset;
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+static int is_first_ref(struct send_ctx *sctx,
+ struct btrfs_root *root,
+ u64 ino, u64 dir,
+ const char *name, int name_len)
+{
+ int ret;
+ struct fs_path *tmp_name;
+ u64 tmp_dir;
+ u64 tmp_dir_gen;
+
+ tmp_name = fs_path_alloc(sctx);
+ if (!tmp_name)
+ return -ENOMEM;
+
+ ret = get_first_ref(sctx, root, ino, &tmp_dir, &tmp_dir_gen, tmp_name);
+ if (ret < 0)
+ goto out;
+
+ if (name_len != fs_path_len(tmp_name)) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = memcmp(tmp_name->start, name, name_len);
+ if (ret)
+ ret = 0;
+ else
+ ret = 1;
+
+out:
+ fs_path_free(sctx, tmp_name);
+ return ret;
+}
+
+static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
+ const char *name, int name_len,
+ u64 *who_ino, u64 *who_gen)
+{
+ int ret = 0;
+ u64 other_inode = 0;
+ u8 other_type = 0;
+
+ if (!sctx->parent_root)
+ goto out;
+
+ ret = is_inode_existent(sctx, dir, dir_gen);
+ if (ret <= 0)
+ goto out;
+
+ ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len,
+ &other_inode, &other_type);
+ if (ret < 0 && ret != -ENOENT)
+ goto out;
+ if (ret) {
+ ret = 0;
+ goto out;
+ }
+
+ if (other_inode > sctx->send_progress) {
+ ret = get_inode_info(sctx->parent_root, other_inode, NULL,
+ who_gen, NULL, NULL, NULL);
+ if (ret < 0)
+ goto out;
+
+ ret = 1;
+ *who_ino = other_inode;
+ } else {
+ ret = 0;
+ }
+
+out:
+ return ret;
+}
+
+static int did_overwrite_ref(struct send_ctx *sctx,
+ u64 dir, u64 dir_gen,
+ u64 ino, u64 ino_gen,
+ const char *name, int name_len)
+{
+ int ret = 0;
+ u64 gen;
+ u64 ow_inode;
+ u8 other_type;
+
+ if (!sctx->parent_root)
+ goto out;
+
+ ret = is_inode_existent(sctx, dir, dir_gen);
+ if (ret <= 0)
+ goto out;
+
+ /* check if the ref was overwritten by another ref */
+ ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len,
+ &ow_inode, &other_type);
+ if (ret < 0 && ret != -ENOENT)
+ goto out;
+ if (ret) {
+ /* was never and will never be overwritten */
+ ret = 0;
+ goto out;
+ }
+
+ ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL,
+ NULL);
+ if (ret < 0)
+ goto out;
+
+ if (ow_inode == ino && gen == ino_gen) {
+ ret = 0;
+ goto out;
+ }
+
+ /* we know that it is or will be overwritten. check this now */
+ if (ow_inode < sctx->send_progress)
+ ret = 1;
+ else
+ ret = 0;
+
+out:
+ return ret;
+}
+
+static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen)
+{
+ int ret = 0;
+ struct fs_path *name = NULL;
+ u64 dir;
+ u64 dir_gen;
+
+ if (!sctx->parent_root)
+ goto out;
+
+ name = fs_path_alloc(sctx);
+ if (!name)
+ return -ENOMEM;
+
+ ret = get_first_ref(sctx, sctx->parent_root, ino, &dir, &dir_gen, name);
+ if (ret < 0)
+ goto out;
+
+ ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen,
+ name->start, fs_path_len(name));
+ if (ret < 0)
+ goto out;
+
+out:
+ fs_path_free(sctx, name);
+ return ret;
+}
+
+static int name_cache_insert(struct send_ctx *sctx,
+ struct name_cache_entry *nce)
+{
+ int ret = 0;
+ struct name_cache_entry **ncea;
+
+ ncea = radix_tree_lookup(&sctx->name_cache, nce->ino);
+ if (ncea) {
+ if (!ncea[0])
+ ncea[0] = nce;
+ else if (!ncea[1])
+ ncea[1] = nce;
+ else
+ BUG();
+ } else {
+ ncea = kmalloc(sizeof(void *) * 2, GFP_NOFS);
+ if (!ncea)
+ return -ENOMEM;
+
+ ncea[0] = nce;
+ ncea[1] = NULL;
+ ret = radix_tree_insert(&sctx->name_cache, nce->ino, ncea);
+ if (ret < 0)
+ return ret;
+ }
+ list_add_tail(&nce->list, &sctx->name_cache_list);
+ sctx->name_cache_size++;
+
+ return ret;
+}
+
+static void name_cache_delete(struct send_ctx *sctx,
+ struct name_cache_entry *nce)
+{
+ struct name_cache_entry **ncea;
+
+ ncea = radix_tree_lookup(&sctx->name_cache, nce->ino);
+ BUG_ON(!ncea);
+
+ if (ncea[0] == nce)
+ ncea[0] = NULL;
+ else if (ncea[1] == nce)
+ ncea[1] = NULL;
+ else
+ BUG();
+
+ if (!ncea[0] && !ncea[1]) {
+ radix_tree_delete(&sctx->name_cache, nce->ino);
+ kfree(ncea);
+ }
+
+ list_del(&nce->list);
+
+ sctx->name_cache_size--;
+}
+
+static struct name_cache_entry *name_cache_search(struct send_ctx *sctx,
+ u64 ino, u64 gen)
+{
+ struct name_cache_entry **ncea;
+
+ ncea = radix_tree_lookup(&sctx->name_cache, ino);
+ if (!ncea)
+ return NULL;
+
+ if (ncea[0] && ncea[0]->gen == gen)
+ return ncea[0];
+ else if (ncea[1] && ncea[1]->gen == gen)
+ return ncea[1];
+ return NULL;
+}
+
+static void name_cache_used(struct send_ctx *sctx, struct name_cache_entry *nce)
+{
+ list_del(&nce->list);
+ list_add_tail(&nce->list, &sctx->name_cache_list);
+}
+
+static void name_cache_clean_unused(struct send_ctx *sctx)
+{
+ struct name_cache_entry *nce;
+
+ if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE)
+ return;
+
+ while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) {
+ nce = list_entry(sctx->name_cache_list.next,
+ struct name_cache_entry, list);
+ name_cache_delete(sctx, nce);
+ kfree(nce);
+ }
+}
+
+static void name_cache_free(struct send_ctx *sctx)
+{
+ struct name_cache_entry *nce;
+ struct name_cache_entry *tmp;
+
+ list_for_each_entry_safe(nce, tmp, &sctx->name_cache_list, list) {
+ name_cache_delete(sctx, nce);
+ }
+}
+
+static int __get_cur_name_and_parent(struct send_ctx *sctx,
+ u64 ino, u64 gen,
+ u64 *parent_ino,
+ u64 *parent_gen,
+ struct fs_path *dest)
+{
+ int ret;
+ int nce_ret;
+ struct btrfs_path *path = NULL;
+ struct name_cache_entry *nce = NULL;
+
+ nce = name_cache_search(sctx, ino, gen);
+ if (nce) {
+ if (ino < sctx->send_progress && nce->need_later_update) {
+ name_cache_delete(sctx, nce);
+ kfree(nce);
+ nce = NULL;
+ } else {
+ name_cache_used(sctx, nce);
+ *parent_ino = nce->parent_ino;
+ *parent_gen = nce->parent_gen;
+ ret = fs_path_add(dest, nce->name, nce->name_len);
+ if (ret < 0)
+ goto out;
+ ret = nce->ret;
+ goto out;
+ }
+ }
+
+ path = alloc_path_for_send();
+ if (!path)
+ return -ENOMEM;
+
+ ret = is_inode_existent(sctx, ino, gen);
+ if (ret < 0)
+ goto out;
+
+ if (!ret) {
+ ret = gen_unique_name(sctx, ino, gen, dest);
+ if (ret < 0)
+ goto out;
+ ret = 1;
+ goto out_cache;
+ }
+
+ if (ino < sctx->send_progress)
+ ret = get_first_ref(sctx, sctx->send_root, ino,
+ parent_ino, parent_gen, dest);
+ else
+ ret = get_first_ref(sctx, sctx->parent_root, ino,
+ parent_ino, parent_gen, dest);
+ if (ret < 0)
+ goto out;
+
+ ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen,
+ dest->start, dest->end - dest->start);
+ if (ret < 0)
+ goto out;
+ if (ret) {
+ fs_path_reset(dest);
+ ret = gen_unique_name(sctx, ino, gen, dest);
+ if (ret < 0)
+ goto out;
+ ret = 1;
+ }
+
+out_cache:
+ nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_NOFS);
+ if (!nce) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ nce->ino = ino;
+ nce->gen = gen;
+ nce->parent_ino = *parent_ino;
+ nce->parent_gen = *parent_gen;
+ nce->name_len = fs_path_len(dest);
+ nce->ret = ret;
+ strcpy(nce->name, dest->start);
+ memset(&nce->use_list, 0, sizeof(nce->use_list));
+
+ if (ino < sctx->send_progress)
+ nce->need_later_update = 0;
+ else
+ nce->need_later_update = 1;
+
+ nce_ret = name_cache_insert(sctx, nce);
+ if (nce_ret < 0)
+ ret = nce_ret;
+ name_cache_clean_unused(sctx);
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+/*
+ * Magic happens here. This function returns the first ref to an inode as it
+ * would look like while receiving the stream at this point in time.
+ * We walk the path up to the root. For every inode in between, we check if it
+ * was already processed/sent. If yes, we continue with the parent as found
+ * in send_root. If not, we continue with the parent as found in parent_root.
+ * If we encounter an inode that was deleted at this point in time, we use the
+ * inodes "orphan" name instead of the real name and stop. Same with new inodes
+ * that were not created yet and overwritten inodes/refs.
+ *
+ * When do we have have orphan inodes:
+ * 1. When an inode is freshly created and thus no valid refs are available yet
+ * 2. When a directory lost all it's refs (deleted) but still has dir items
+ * inside which were not processed yet (pending for move/delete). If anyone
+ * tried to get the path to the dir items, it would get a path inside that
+ * orphan directory.
+ * 3. When an inode is moved around or gets new links, it may overwrite the ref
+ * of an unprocessed inode. If in that case the first ref would be
+ * overwritten, the overwritten inode gets "orphanized". Later when we
+ * process this overwritten inode, it is restored at a new place by moving
+ * the orphan inode.
+ *
+ * sctx->send_progress tells this function at which point in time receiving
+ * would be.
+ */
+static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
+ struct fs_path *dest)
+{
+ int ret = 0;
+ struct fs_path *name = NULL;
+ u64 parent_inode = 0;
+ u64 parent_gen = 0;
+ int stop = 0;
+
+ name = fs_path_alloc(sctx);
+ if (!name) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dest->reversed = 1;
+ fs_path_reset(dest);
+
+ while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) {
+ fs_path_reset(name);
+
+ ret = __get_cur_name_and_parent(sctx, ino, gen,
+ &parent_inode, &parent_gen, name);
+ if (ret < 0)
+ goto out;
+ if (ret)
+ stop = 1;
+
+ ret = fs_path_add_path(dest, name);
+ if (ret < 0)
+ goto out;
+
+ ino = parent_inode;
+ gen = parent_gen;
+ }
+
+out:
+ fs_path_free(sctx, name);
+ if (!ret)
+ fs_path_unreverse(dest);
+ return ret;
+}
+
+/*
+ * Called for regular files when sending extents data. Opens a struct file
+ * to read from the file.
+ */
+static int open_cur_inode_file(struct send_ctx *sctx)
+{
+ int ret = 0;
+ struct btrfs_key key;
+ struct path path;
+ struct inode *inode;
+ struct dentry *dentry;
+ struct file *filp;
+ int new = 0;
+
+ if (sctx->cur_inode_filp)
+ goto out;
+
+ key.objectid = sctx->cur_ino;
+ key.type = BTRFS_INODE_ITEM_KEY;
+ key.offset = 0;
+
+ inode = btrfs_iget(sctx->send_root->fs_info->sb, &key, sctx->send_root,
+ &new);
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
+ goto out;
+ }
+
+ dentry = d_obtain_alias(inode);
+ inode = NULL;
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ path.mnt = sctx->mnt;
+ path.dentry = dentry;
+ filp = dentry_open(&path, O_RDONLY | O_LARGEFILE, current_cred());
+ dput(dentry);
+ dentry = NULL;
+ if (IS_ERR(filp)) {
+ ret = PTR_ERR(filp);
+ goto out;
+ }
+ sctx->cur_inode_filp = filp;
+
+out:
+ /*
+ * no xxxput required here as every vfs op
+ * does it by itself on failure
+ */
+ return ret;
+}
+
+/*
+ * Closes the struct file that was created in open_cur_inode_file
+ */
+static int close_cur_inode_file(struct send_ctx *sctx)
+{
+ int ret = 0;
+
+ if (!sctx->cur_inode_filp)
+ goto out;
+
+ ret = filp_close(sctx->cur_inode_filp, NULL);
+ sctx->cur_inode_filp = NULL;
+
+out:
+ return ret;
+}
+
+/*
+ * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace
+ */
+static int send_subvol_begin(struct send_ctx *sctx)
+{
+ int ret;
+ struct btrfs_root *send_root = sctx->send_root;
+ struct btrfs_root *parent_root = sctx->parent_root;
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ struct btrfs_root_ref *ref;
+ struct extent_buffer *leaf;
+ char *name = NULL;
+ int namelen;
+
+ path = alloc_path_for_send();
+ if (!path)
+ return -ENOMEM;
+
+ name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_NOFS);
+ if (!name) {
+ btrfs_free_path(path);
+ return -ENOMEM;
+ }
+
+ key.objectid = send_root->objectid;
+ key.type = BTRFS_ROOT_BACKREF_KEY;
+ key.offset = 0;
+
+ ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root,
+ &key, path, 1, 0);
+ if (ret < 0)
+ goto out;
+ if (ret) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ leaf = path->nodes[0];
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ if (key.type != BTRFS_ROOT_BACKREF_KEY ||
+ key.objectid != send_root->objectid) {
+ ret = -ENOENT;
+ goto out;
+ }
+ ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
+ namelen = btrfs_root_ref_name_len(leaf, ref);
+ read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen);
+ btrfs_release_path(path);
+
+ if (ret < 0)
+ goto out;
+
+ if (parent_root) {
+ ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT);
+ if (ret < 0)
+ goto out;
+ } else {
+ ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL);
+ if (ret < 0)
+ goto out;
+ }
+
+ TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen);
+ TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
+ sctx->send_root->root_item.uuid);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID,
+ sctx->send_root->root_item.ctransid);
+ if (parent_root) {
+ TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
+ sctx->parent_root->root_item.uuid);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
+ sctx->parent_root->root_item.ctransid);
+ }
+
+ ret = send_cmd(sctx);
+
+tlv_put_failure:
+out:
+ btrfs_free_path(path);
+ kfree(name);
+ return ret;
+}
+
+static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
+{
+ int ret = 0;
+ struct fs_path *p;
+
+verbose_printk("btrfs: send_truncate %llu size=%llu\n", ino, size);
+
+ p = fs_path_alloc(sctx);
+ if (!p)
+ return -ENOMEM;
+
+ ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE);
+ if (ret < 0)
+ goto out;
+
+ ret = get_cur_path(sctx, ino, gen, p);
+ if (ret < 0)
+ goto out;
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size);
+
+ ret = send_cmd(sctx);
+
+tlv_put_failure:
+out:
+ fs_path_free(sctx, p);
+ return ret;
+}
+
+static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
+{
+ int ret = 0;
+ struct fs_path *p;
+
+verbose_printk("btrfs: send_chmod %llu mode=%llu\n", ino, mode);
+
+ p = fs_path_alloc(sctx);
+ if (!p)
+ return -ENOMEM;
+
+ ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD);
+ if (ret < 0)
+ goto out;
+
+ ret = get_cur_path(sctx, ino, gen, p);
+ if (ret < 0)
+ goto out;
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777);
+
+ ret = send_cmd(sctx);
+
+tlv_put_failure:
+out:
+ fs_path_free(sctx, p);
+ return ret;
+}
+
+static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
+{
+ int ret = 0;
+ struct fs_path *p;
+
+verbose_printk("btrfs: send_chown %llu uid=%llu, gid=%llu\n", ino, uid, gid);
+
+ p = fs_path_alloc(sctx);
+ if (!p)
+ return -ENOMEM;
+
+ ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN);
+ if (ret < 0)
+ goto out;
+
+ ret = get_cur_path(sctx, ino, gen, p);
+ if (ret < 0)
+ goto out;
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid);
+
+ ret = send_cmd(sctx);
+
+tlv_put_failure:
+out:
+ fs_path_free(sctx, p);
+ return ret;
+}
+
+static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
+{
+ int ret = 0;
+ struct fs_path *p = NULL;
+ struct btrfs_inode_item *ii;
+ struct btrfs_path *path = NULL;
+ struct extent_buffer *eb;
+ struct btrfs_key key;
+ int slot;
+
+verbose_printk("btrfs: send_utimes %llu\n", ino);
+
+ p = fs_path_alloc(sctx);
+ if (!p)
+ return -ENOMEM;
+
+ path = alloc_path_for_send();
+ if (!path) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ key.objectid = ino;
+ key.type = BTRFS_INODE_ITEM_KEY;
+ key.offset = 0;
+ ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
+ if (ret < 0)
+ goto out;
+
+ eb = path->nodes[0];
+ slot = path->slots[0];
+ ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
+
+ ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES);
+ if (ret < 0)
+ goto out;
+
+ ret = get_cur_path(sctx, ino, gen, p);
+ if (ret < 0)
+ goto out;
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
+ TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb,
+ btrfs_inode_atime(ii));
+ TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb,
+ btrfs_inode_mtime(ii));
+ TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb,
+ btrfs_inode_ctime(ii));
+ /* TODO otime? */
+
+ ret = send_cmd(sctx);
+
+tlv_put_failure:
+out:
+ fs_path_free(sctx, p);
+ btrfs_free_path(path);
+ return ret;
+}
+
+/*
+ * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have
+ * a valid path yet because we did not process the refs yet. So, the inode
+ * is created as orphan.
+ */
+static int send_create_inode(struct send_ctx *sctx, struct btrfs_path *path,
+ struct btrfs_key *key)
+{
+ int ret = 0;
+ struct extent_buffer *eb = path->nodes[0];
+ struct btrfs_inode_item *ii;
+ struct fs_path *p;
+ int slot = path->slots[0];
+ int cmd;
+ u64 mode;
+
+verbose_printk("btrfs: send_create_inode %llu\n", sctx->cur_ino);
+
+ p = fs_path_alloc(sctx);
+ if (!p)
+ return -ENOMEM;
+
+ ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
+ mode = btrfs_inode_mode(eb, ii);
+
+ if (S_ISREG(mode))
+ cmd = BTRFS_SEND_C_MKFILE;
+ else if (S_ISDIR(mode))
+ cmd = BTRFS_SEND_C_MKDIR;
+ else if (S_ISLNK(mode))
+ cmd = BTRFS_SEND_C_SYMLINK;
+ else if (S_ISCHR(mode) || S_ISBLK(mode))
+ cmd = BTRFS_SEND_C_MKNOD;
+ else if (S_ISFIFO(mode))
+ cmd = BTRFS_SEND_C_MKFIFO;
+ else if (S_ISSOCK(mode))
+ cmd = BTRFS_SEND_C_MKSOCK;
+ else {
+ printk(KERN_WARNING "btrfs: unexpected inode type %o",
+ (int)(mode & S_IFMT));
+ ret = -ENOTSUPP;
+ goto out;
+ }
+
+ ret = begin_cmd(sctx, cmd);
+ if (ret < 0)
+ goto out;
+
+ ret = gen_unique_name(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
+ if (ret < 0)
+ goto out;
+
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, sctx->cur_ino);
+
+ if (S_ISLNK(mode)) {
+ fs_path_reset(p);
+ ret = read_symlink(sctx, sctx->send_root, sctx->cur_ino, p);
+ if (ret < 0)
+ goto out;
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p);
+ } else if (S_ISCHR(mode) || S_ISBLK(mode) ||
+ S_ISFIFO(mode) || S_ISSOCK(mode)) {
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, btrfs_inode_rdev(eb, ii));
+ }
+
+ ret = send_cmd(sctx);
+ if (ret < 0)
+ goto out;
+
+
+tlv_put_failure:
+out:
+ fs_path_free(sctx, p);
+ return ret;
+}
+
+struct recorded_ref {
+ struct list_head list;
+ char *dir_path;
+ char *name;
+ struct fs_path *full_path;
+ u64 dir;
+ u64 dir_gen;
+ int dir_path_len;
+ int name_len;
+};
+
+/*
+ * We need to process new refs before deleted refs, but compare_tree gives us
+ * everything mixed. So we first record all refs and later process them.
+ * This function is a helper to record one ref.
+ */
+static int record_ref(struct list_head *head, u64 dir,
+ u64 dir_gen, struct fs_path *path)
+{
+ struct recorded_ref *ref;
+ char *tmp;
+
+ ref = kmalloc(sizeof(*ref), GFP_NOFS);
+ if (!ref)
+ return -ENOMEM;
+
+ ref->dir = dir;
+ ref->dir_gen = dir_gen;
+ ref->full_path = path;
+
+ tmp = strrchr(ref->full_path->start, '/');
+ if (!tmp) {
+ ref->name_len = ref->full_path->end - ref->full_path->start;
+ ref->name = ref->full_path->start;
+ ref->dir_path_len = 0;
+ ref->dir_path = ref->full_path->start;
+ } else {
+ tmp++;
+ ref->name_len = ref->full_path->end - tmp;
+ ref->name = tmp;
+ ref->dir_path = ref->full_path->start;
+ ref->dir_path_len = ref->full_path->end -
+ ref->full_path->start - 1 - ref->name_len;
+ }
+
+ list_add_tail(&ref->list, head);
+ return 0;
+}
+
+static void __free_recorded_refs(struct send_ctx *sctx, struct list_head *head)
+{
+ struct recorded_ref *cur;
+ struct recorded_ref *tmp;
+
+ list_for_each_entry_safe(cur, tmp, head, list) {
+ fs_path_free(sctx, cur->full_path);
+ kfree(cur);
+ }
+ INIT_LIST_HEAD(head);
+}
+
+static void free_recorded_refs(struct send_ctx *sctx)
+{
+ __free_recorded_refs(sctx, &sctx->new_refs);
+ __free_recorded_refs(sctx, &sctx->deleted_refs);
+}
+
+/*
+ * Renames/moves a file/dir to it's orphan name. Used when the first
+ * ref of an unprocessed inode gets overwritten and for all non empty
+ * directories.
+ */
+static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen,
+ struct fs_path *path)
+{
+ int ret;
+ struct fs_path *orphan;
+
+ orphan = fs_path_alloc(sctx);
+ if (!orphan)
+ return -ENOMEM;
+
+ ret = gen_unique_name(sctx, ino, gen, orphan);
+ if (ret < 0)
+ goto out;
+
+ ret = send_rename(sctx, path, orphan);
+
+out:
+ fs_path_free(sctx, orphan);
+ return ret;
+}
+
+/*
+ * Returns 1 if a directory can be removed at this point in time.
+ * We check this by iterating all dir items and checking if the inode behind
+ * the dir item was already processed.
+ */
+static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 send_progress)
+{
+ int ret = 0;
+ struct btrfs_root *root = sctx->parent_root;
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ struct btrfs_key found_key;
+ struct btrfs_key loc;
+ struct btrfs_dir_item *di;
+
+ path = alloc_path_for_send();
+ if (!path)
+ return -ENOMEM;
+
+ key.objectid = dir;
+ key.type = BTRFS_DIR_INDEX_KEY;
+ key.offset = 0;
+
+ while (1) {
+ ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
+ if (ret < 0)
+ goto out;
+ if (!ret) {
+ btrfs_item_key_to_cpu(path->nodes[0], &found_key,
+ path->slots[0]);
+ }
+ if (ret || found_key.objectid != key.objectid ||
+ found_key.type != key.type) {
+ break;
+ }
+
+ di = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_dir_item);
+ btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc);
+
+ if (loc.objectid > send_progress) {
+ ret = 0;
+ goto out;
+ }
+
+ btrfs_release_path(path);
+ key.offset = found_key.offset + 1;
+ }
+
+ ret = 1;
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+struct finish_unordered_dir_ctx {
+ struct send_ctx *sctx;
+ struct fs_path *cur_path;
+ struct fs_path *dir_path;
+ u64 dir_ino;
+ int need_delete;
+ int delete_pass;
+};
+
+int __finish_unordered_dir(int num, struct btrfs_key *di_key,
+ const char *name, int name_len,
+ const char *data, int data_len,
+ u8 type, void *ctx)
+{
+ int ret = 0;
+ struct finish_unordered_dir_ctx *fctx = ctx;
+ struct send_ctx *sctx = fctx->sctx;
+ u64 di_gen;
+ u64 di_mode;
+ int is_orphan = 0;
+
+ if (di_key->objectid >= fctx->dir_ino)
+ goto out;
+
+ fs_path_reset(fctx->cur_path);
+
+ ret = get_inode_info(sctx->send_root, di_key->objectid,
+ NULL, &di_gen, &di_mode, NULL, NULL);
+ if (ret < 0)
+ goto out;
+
+ ret = is_first_ref(sctx, sctx->send_root, di_key->objectid,
+ fctx->dir_ino, name, name_len);
+ if (ret < 0)
+ goto out;
+ if (ret) {
+ is_orphan = 1;
+ ret = gen_unique_name(sctx, di_key->objectid, di_gen,
+ fctx->cur_path);
+ } else {
+ ret = get_cur_path(sctx, di_key->objectid, di_gen,
+ fctx->cur_path);
+ }
+ if (ret < 0)
+ goto out;
+
+ ret = fs_path_add(fctx->dir_path, name, name_len);
+ if (ret < 0)
+ goto out;
+
+ if (!fctx->delete_pass) {
+ if (S_ISDIR(di_mode)) {
+ ret = send_rename(sctx, fctx->cur_path,
+ fctx->dir_path);
+ } else {
+ ret = send_link(sctx, fctx->dir_path,
+ fctx->cur_path);
+ if (is_orphan)
+ fctx->need_delete = 1;
+ }
+ } else if (!S_ISDIR(di_mode)) {
+ ret = send_unlink(sctx, fctx->cur_path);
+ } else {
+ ret = 0;
+ }
+
+ fs_path_remove(fctx->dir_path);
+
+out:
+ return ret;
+}
+
+/*
+ * Go through all dir items and see if we find refs which could not be created
+ * in the past because the dir did not exist at that time.
+ */
+static int finish_outoforder_dir(struct send_ctx *sctx, u64 dir, u64 dir_gen)
+{
+ int ret = 0;
+ struct btrfs_path *path = NULL;
+ struct btrfs_key key;
+ struct btrfs_key found_key;
+ struct extent_buffer *eb;
+ struct finish_unordered_dir_ctx fctx;
+ int slot;
+
+ path = alloc_path_for_send();
+ if (!path) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memset(&fctx, 0, sizeof(fctx));
+ fctx.sctx = sctx;
+ fctx.cur_path = fs_path_alloc(sctx);
+ fctx.dir_path = fs_path_alloc(sctx);
+ if (!fctx.cur_path || !fctx.dir_path) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ fctx.dir_ino = dir;
+
+ ret = get_cur_path(sctx, dir, dir_gen, fctx.dir_path);
+ if (ret < 0)
+ goto out;
+
+ /*
+ * We do two passes. The first links in the new refs and the second
+ * deletes orphans if required. Deletion of orphans is not required for
+ * directory inodes, as we always have only one ref and use rename
+ * instead of link for those.
+ */
+
+again:
+ key.objectid = dir;
+ key.type = BTRFS_DIR_ITEM_KEY;
+ key.offset = 0;
+ while (1) {
+ ret = btrfs_search_slot_for_read(sctx->send_root, &key, path,
+ 1, 0);
+ if (ret < 0)
+ goto out;
+ eb = path->nodes[0];
+ slot = path->slots[0];
+ btrfs_item_key_to_cpu(eb, &found_key, slot);
+
+ if (found_key.objectid != key.objectid ||
+ found_key.type != key.type) {
+ btrfs_release_path(path);
+ break;
+ }
+
+ ret = iterate_dir_item(sctx, sctx->send_root, path,
+ &found_key, __finish_unordered_dir,
+ &fctx);
+ if (ret < 0)
+ goto out;
+
+ key.offset = found_key.offset + 1;
+ btrfs_release_path(path);
+ }
+
+ if (!fctx.delete_pass && fctx.need_delete) {
+ fctx.delete_pass = 1;
+ goto again;
+ }
+
+out:
+ btrfs_free_path(path);
+ fs_path_free(sctx, fctx.cur_path);
+ fs_path_free(sctx, fctx.dir_path);
+ return ret;
+}
+
+/*
+ * This does all the move/link/unlink/rmdir magic.
+ */
+static int process_recorded_refs(struct send_ctx *sctx)
+{
+ int ret = 0;
+ struct recorded_ref *cur;
+ struct ulist *check_dirs = NULL;
+ struct ulist_iterator uit;
+ struct ulist_node *un;
+ struct fs_path *valid_path = NULL;
+ u64 ow_inode = 0;
+ u64 ow_gen;
+ int did_overwrite = 0;
+ int is_orphan = 0;
+
+verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
+
+ valid_path = fs_path_alloc(sctx);
+ if (!valid_path) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ check_dirs = ulist_alloc(GFP_NOFS);
+ if (!check_dirs) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * First, check if the first ref of the current inode was overwritten
+ * before. If yes, we know that the current inode was already orphanized
+ * and thus use the orphan name. If not, we can use get_cur_path to
+ * get the path of the first ref as it would like while receiving at
+ * this point in time.
+ * New inodes are always orphan at the beginning, so force to use the
+ * orphan name in this case.
+ * The first ref is stored in valid_path and will be updated if it
+ * gets moved around.
+ */
+ if (!sctx->cur_inode_new) {
+ ret = did_overwrite_first_ref(sctx, sctx->cur_ino,
+ sctx->cur_inode_gen);
+ if (ret < 0)
+ goto out;
+ if (ret)
+ did_overwrite = 1;
+ }
+ if (sctx->cur_inode_new || did_overwrite) {
+ ret = gen_unique_name(sctx, sctx->cur_ino,
+ sctx->cur_inode_gen, valid_path);
+ if (ret < 0)
+ goto out;
+ is_orphan = 1;
+ } else {
+ ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
+ valid_path);
+ if (ret < 0)
+ goto out;
+ }
+
+ list_for_each_entry(cur, &sctx->new_refs, list) {
+ /*
+ * Check if this new ref would overwrite the first ref of
+ * another unprocessed inode. If yes, orphanize the
+ * overwritten inode. If we find an overwritten ref that is
+ * not the first ref, simply unlink it.
+ */
+ ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen,
+ cur->name, cur->name_len,
+ &ow_inode, &ow_gen);
+ if (ret < 0)
+ goto out;
+ if (ret) {
+ ret = is_first_ref(sctx, sctx->parent_root,
+ ow_inode, cur->dir, cur->name,
+ cur->name_len);
+ if (ret < 0)
+ goto out;
+ if (ret) {
+ ret = orphanize_inode(sctx, ow_inode, ow_gen,
+ cur->full_path);
+ if (ret < 0)
+ goto out;
+ } else {
+ ret = send_unlink(sctx, cur->full_path);
+ if (ret < 0)
+ goto out;
+ }
+ }
+
+ /*
+ * link/move the ref to the new place. If we have an orphan
+ * inode, move it and update valid_path. If not, link or move
+ * it depending on the inode mode.
+ */
+ if (is_orphan && !sctx->cur_inode_first_ref_orphan) {
+ ret = send_rename(sctx, valid_path, cur->full_path);
+ if (ret < 0)
+ goto out;
+ is_orphan = 0;
+ ret = fs_path_copy(valid_path, cur->full_path);
+ if (ret < 0)
+ goto out;
+ } else {
+ if (S_ISDIR(sctx->cur_inode_mode)) {
+ /*
+ * Dirs can't be linked, so move it. For moved
+ * dirs, we always have one new and one deleted
+ * ref. The deleted ref is ignored later.
+ */
+ ret = send_rename(sctx, valid_path,
+ cur->full_path);
+ if (ret < 0)
+ goto out;
+ ret = fs_path_copy(valid_path, cur->full_path);
+ if (ret < 0)
+ goto out;
+ } else {
+ ret = send_link(sctx, cur->full_path,
+ valid_path);
+ if (ret < 0)
+ goto out;
+ }
+ }
+ ret = ulist_add(check_dirs, cur->dir, cur->dir_gen,
+ GFP_NOFS);
+ if (ret < 0)
+ goto out;
+ }
+
+ if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) {
+ /*
+ * Check if we can already rmdir the directory. If not,
+ * orphanize it. For every dir item inside that gets deleted
+ * later, we do this check again and rmdir it then if possible.
+ * See the use of check_dirs for more details.
+ */
+ ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_ino);
+ if (ret < 0)
+ goto out;
+ if (ret) {
+ ret = send_rmdir(sctx, valid_path);
+ if (ret < 0)
+ goto out;
+ } else if (!is_orphan) {
+ ret = orphanize_inode(sctx, sctx->cur_ino,
+ sctx->cur_inode_gen, valid_path);
+ if (ret < 0)
+ goto out;
+ is_orphan = 1;
+ }
+
+ list_for_each_entry(cur, &sctx->deleted_refs, list) {
+ ret = ulist_add(check_dirs, cur->dir, cur->dir_gen,
+ GFP_NOFS);
+ if (ret < 0)
+ goto out;
+ }
+ } else if (!S_ISDIR(sctx->cur_inode_mode)) {
+ /*
+ * We have a non dir inode. Go through all deleted refs and
+ * unlink them if they were not already overwritten by other
+ * inodes.
+ */
+ list_for_each_entry(cur, &sctx->deleted_refs, list) {
+ ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen,
+ sctx->cur_ino, sctx->cur_inode_gen,
+ cur->name, cur->name_len);
+ if (ret < 0)
+ goto out;
+ if (!ret) {
+ /*
+ * In case the inode was moved to a directory
+ * that was not created yet (see
+ * __record_new_ref), we can not unlink the ref
+ * as it will be needed later when the parent
+ * directory is created, so that we can move in
+ * the inode to the new dir.
+ */
+ if (!is_orphan &&
+ sctx->cur_inode_first_ref_orphan) {
+ ret = orphanize_inode(sctx,
+ sctx->cur_ino,
+ sctx->cur_inode_gen,
+ cur->full_path);
+ if (ret < 0)
+ goto out;
+ ret = gen_unique_name(sctx,
+ sctx->cur_ino,
+ sctx->cur_inode_gen,
+ valid_path);
+ if (ret < 0)
+ goto out;
+ is_orphan = 1;
+
+ } else {
+ ret = send_unlink(sctx, cur->full_path);
+ if (ret < 0)
+ goto out;
+ }
+ }
+ ret = ulist_add(check_dirs, cur->dir, cur->dir_gen,
+ GFP_NOFS);
+ if (ret < 0)
+ goto out;
+ }
+
+ /*
+ * If the inode is still orphan, unlink the orphan. This may
+ * happen when a previous inode did overwrite the first ref
+ * of this inode and no new refs were added for the current
+ * inode.
+ * We can however not delete the orphan in case the inode relies
+ * in a directory that was not created yet (see
+ * __record_new_ref)
+ */
+ if (is_orphan && !sctx->cur_inode_first_ref_orphan) {
+ ret = send_unlink(sctx, valid_path);
+ if (ret < 0)
+ goto out;
+ }
+ }
+
+ /*
+ * We did collect all parent dirs where cur_inode was once located. We
+ * now go through all these dirs and check if they are pending for
+ * deletion and if it's finally possible to perform the rmdir now.
+ * We also update the inode stats of the parent dirs here.
+ */
+ ULIST_ITER_INIT(&uit);
+ while ((un = ulist_next(check_dirs, &uit))) {
+ if (un->val > sctx->cur_ino)
+ continue;
+
+ ret = get_cur_inode_state(sctx, un->val, un->aux);
+ if (ret < 0)
+ goto out;
+
+ if (ret == inode_state_did_create ||
+ ret == inode_state_no_change) {
+ /* TODO delayed utimes */
+ ret = send_utimes(sctx, un->val, un->aux);
+ if (ret < 0)
+ goto out;
+ } else if (ret == inode_state_did_delete) {
+ ret = can_rmdir(sctx, un->val, sctx->cur_ino);
+ if (ret < 0)
+ goto out;
+ if (ret) {
+ ret = get_cur_path(sctx, un->val, un->aux,
+ valid_path);
+ if (ret < 0)
+ goto out;
+ ret = send_rmdir(sctx, valid_path);
+ if (ret < 0)
+ goto out;
+ }
+ }
+ }
+
+ /*
+ * Current inode is now at it's new position, so we must increase
+ * send_progress
+ */
+ sctx->send_progress = sctx->cur_ino + 1;
+
+ /*
+ * We may have a directory here that has pending refs which could not
+ * be created before (because the dir did not exist before, see
+ * __record_new_ref). finish_outoforder_dir will link/move the pending
+ * refs.
+ */
+ if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_new) {
+ ret = finish_outoforder_dir(sctx, sctx->cur_ino,
+ sctx->cur_inode_gen);
+ if (ret < 0)
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ free_recorded_refs(sctx);
+ ulist_free(check_dirs);
+ fs_path_free(sctx, valid_path);
+ return ret;
+}
+
+static int __record_new_ref(int num, u64 dir, int index,
+ struct fs_path *name,
+ void *ctx)
+{
+ int ret = 0;
+ struct send_ctx *sctx = ctx;
+ struct fs_path *p;
+ u64 gen;
+
+ p = fs_path_alloc(sctx);
+ if (!p)
+ return -ENOMEM;
+
+ ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL, NULL,
+ NULL);
+ if (ret < 0)
+ goto out;
+
+ /*
+ * The parent may be non-existent at this point in time. This happens
+ * if the ino of the parent dir is higher then the current ino. In this
+ * case, we can not process this ref until the parent dir is finally
+ * created. If we reach the parent dir later, process_recorded_refs
+ * will go through all dir items and process the refs that could not be
+ * processed before. In case this is the first ref, we set
+ * cur_inode_first_ref_orphan to 1 to inform process_recorded_refs to
+ * keep an orphan of the inode so that it later can be used for
+ * link/move
+ */
+ ret = is_inode_existent(sctx, dir, gen);
+ if (ret < 0)
+ goto out;
+ if (!ret) {
+ ret = is_first_ref(sctx, sctx->send_root, sctx->cur_ino, dir,
+ name->start, fs_path_len(name));
+ if (ret < 0)
+ goto out;
+ if (ret)
+ sctx->cur_inode_first_ref_orphan = 1;
+ ret = 0;
+ goto out;
+ }
+
+ ret = get_cur_path(sctx, dir, gen, p);
+ if (ret < 0)
+ goto out;
+ ret = fs_path_add_path(p, name);
+ if (ret < 0)
+ goto out;
+
+ ret = record_ref(&sctx->new_refs, dir, gen, p);
+
+out:
+ if (ret)
+ fs_path_free(sctx, p);
+ return ret;
+}
+
+static int __record_deleted_ref(int num, u64 dir, int index,
+ struct fs_path *name,
+ void *ctx)
+{
+ int ret = 0;
+ struct send_ctx *sctx = ctx;
+ struct fs_path *p;
+ u64 gen;
+
+ p = fs_path_alloc(sctx);
+ if (!p)
+ return -ENOMEM;
+
+ ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL, NULL,
+ NULL);
+ if (ret < 0)
+ goto out;
+
+ ret = get_cur_path(sctx, dir, gen, p);
+ if (ret < 0)
+ goto out;
+ ret = fs_path_add_path(p, name);
+ if (ret < 0)
+ goto out;
+
+ ret = record_ref(&sctx->deleted_refs, dir, gen, p);
+
+out:
+ if (ret)
+ fs_path_free(sctx, p);
+ return ret;
+}
+
+static int record_new_ref(struct send_ctx *sctx)
+{
+ int ret;
+
+ ret = iterate_inode_ref(sctx, sctx->send_root, sctx->left_path,
+ sctx->cmp_key, 0, __record_new_ref, sctx);
+ if (ret < 0)
+ goto out;
+ ret = 0;
+
+out:
+ return ret;
+}
+
+static int record_deleted_ref(struct send_ctx *sctx)
+{
+ int ret;
+
+ ret = iterate_inode_ref(sctx, sctx->parent_root, sctx->right_path,
+ sctx->cmp_key, 0, __record_deleted_ref, sctx);
+ if (ret < 0)
+ goto out;
+ ret = 0;
+
+out:
+ return ret;
+}
+
+struct find_ref_ctx {
+ u64 dir;
+ struct fs_path *name;
+ int found_idx;
+};
+
+static int __find_iref(int num, u64 dir, int index,
+ struct fs_path *name,
+ void *ctx_)
+{
+ struct find_ref_ctx *ctx = ctx_;
+
+ if (dir == ctx->dir && fs_path_len(name) == fs_path_len(ctx->name) &&
+ strncmp(name->start, ctx->name->start, fs_path_len(name)) == 0) {
+ ctx->found_idx = num;
+ return 1;
+ }
+ return 0;
+}
+
+static int find_iref(struct send_ctx *sctx,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct btrfs_key *key,
+ u64 dir, struct fs_path *name)
+{
+ int ret;
+ struct find_ref_ctx ctx;
+
+ ctx.dir = dir;
+ ctx.name = name;
+ ctx.found_idx = -1;
+
+ ret = iterate_inode_ref(sctx, root, path, key, 0, __find_iref, &ctx);
+ if (ret < 0)
+ return ret;
+
+ if (ctx.found_idx == -1)
+ return -ENOENT;
+
+ return ctx.found_idx;
+}
+
+static int __record_changed_new_ref(int num, u64 dir, int index,
+ struct fs_path *name,
+ void *ctx)
+{
+ int ret;
+ struct send_ctx *sctx = ctx;
+
+ ret = find_iref(sctx, sctx->parent_root, sctx->right_path,
+ sctx->cmp_key, dir, name);
+ if (ret == -ENOENT)
+ ret = __record_new_ref(num, dir, index, name, sctx);
+ else if (ret > 0)
+ ret = 0;
+
+ return ret;
+}
+
+static int __record_changed_deleted_ref(int num, u64 dir, int index,
+ struct fs_path *name,
+ void *ctx)
+{
+ int ret;
+ struct send_ctx *sctx = ctx;
+
+ ret = find_iref(sctx, sctx->send_root, sctx->left_path, sctx->cmp_key,
+ dir, name);
+ if (ret == -ENOENT)
+ ret = __record_deleted_ref(num, dir, index, name, sctx);
+ else if (ret > 0)
+ ret = 0;
+
+ return ret;
+}
+
+static int record_changed_ref(struct send_ctx *sctx)
+{
+ int ret = 0;
+
+ ret = iterate_inode_ref(sctx, sctx->send_root, sctx->left_path,
+ sctx->cmp_key, 0, __record_changed_new_ref, sctx);
+ if (ret < 0)
+ goto out;
+ ret = iterate_inode_ref(sctx, sctx->parent_root, sctx->right_path,
+ sctx->cmp_key, 0, __record_changed_deleted_ref, sctx);
+ if (ret < 0)
+ goto out;
+ ret = 0;
+
+out:
+ return ret;
+}
+
+/*
+ * Record and process all refs at once. Needed when an inode changes the
+ * generation number, which means that it was deleted and recreated.
+ */
+static int process_all_refs(struct send_ctx *sctx,
+ enum btrfs_compare_tree_result cmd)
+{
+ int ret;
+ struct btrfs_root *root;
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ struct btrfs_key found_key;
+ struct extent_buffer *eb;
+ int slot;
+ iterate_inode_ref_t cb;
+
+ path = alloc_path_for_send();
+ if (!path)
+ return -ENOMEM;
+
+ if (cmd == BTRFS_COMPARE_TREE_NEW) {
+ root = sctx->send_root;
+ cb = __record_new_ref;
+ } else if (cmd == BTRFS_COMPARE_TREE_DELETED) {
+ root = sctx->parent_root;
+ cb = __record_deleted_ref;
+ } else {
+ BUG();
+ }
+
+ key.objectid = sctx->cmp_key->objectid;
+ key.type = BTRFS_INODE_REF_KEY;
+ key.offset = 0;
+ while (1) {
+ ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
+ if (ret < 0) {
+ btrfs_release_path(path);
+ goto out;
+ }
+ if (ret) {
+ btrfs_release_path(path);
+ break;
+ }
+
+ eb = path->nodes[0];
+ slot = path->slots[0];
+ btrfs_item_key_to_cpu(eb, &found_key, slot);
+
+ if (found_key.objectid != key.objectid ||
+ found_key.type != key.type) {
+ btrfs_release_path(path);
+ break;
+ }
+
+ ret = iterate_inode_ref(sctx, sctx->parent_root, path,
+ &found_key, 0, cb, sctx);
+ btrfs_release_path(path);
+ if (ret < 0)
+ goto out;
+
+ key.offset = found_key.offset + 1;
+ }
+
+ ret = process_recorded_refs(sctx);
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+static int send_set_xattr(struct send_ctx *sctx,
+ struct fs_path *path,
+ const char *name, int name_len,
+ const char *data, int data_len)
+{
+ int ret = 0;
+
+ ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR);
+ if (ret < 0)
+ goto out;
+
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
+ TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
+ TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len);
+
+ ret = send_cmd(sctx);
+
+tlv_put_failure:
+out:
+ return ret;
+}
+
+static int send_remove_xattr(struct send_ctx *sctx,
+ struct fs_path *path,
+ const char *name, int name_len)
+{
+ int ret = 0;
+
+ ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR);
+ if (ret < 0)
+ goto out;
+
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
+ TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
+
+ ret = send_cmd(sctx);
+
+tlv_put_failure:
+out:
+ return ret;
+}
+
+static int __process_new_xattr(int num, struct btrfs_key *di_key,
+ const char *name, int name_len,
+ const char *data, int data_len,
+ u8 type, void *ctx)
+{
+ int ret;
+ struct send_ctx *sctx = ctx;
+ struct fs_path *p;
+ posix_acl_xattr_header dummy_acl;
+
+ p = fs_path_alloc(sctx);
+ if (!p)
+ return -ENOMEM;
+
+ /*
+ * This hack is needed because empty acl's are stored as zero byte
+ * data in xattrs. Problem with that is, that receiving these zero byte
+ * acl's will fail later. To fix this, we send a dummy acl list that
+ * only contains the version number and no entries.
+ */
+ if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) ||
+ !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) {
+ if (data_len == 0) {
+ dummy_acl.a_version =
+ cpu_to_le32(POSIX_ACL_XATTR_VERSION);
+ data = (char *)&dummy_acl;
+ data_len = sizeof(dummy_acl);
+ }
+ }
+
+ ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
+ if (ret < 0)
+ goto out;
+
+ ret = send_set_xattr(sctx, p, name, name_len, data, data_len);
+
+out:
+ fs_path_free(sctx, p);
+ return ret;
+}
+
+static int __process_deleted_xattr(int num, struct btrfs_key *di_key,
+ const char *name, int name_len,
+ const char *data, int data_len,
+ u8 type, void *ctx)
+{
+ int ret;
+ struct send_ctx *sctx = ctx;
+ struct fs_path *p;
+
+ p = fs_path_alloc(sctx);
+ if (!p)
+ return -ENOMEM;
+
+ ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
+ if (ret < 0)
+ goto out;
+
+ ret = send_remove_xattr(sctx, p, name, name_len);
+
+out:
+ fs_path_free(sctx, p);
+ return ret;
+}
+
+static int process_new_xattr(struct send_ctx *sctx)
+{
+ int ret = 0;
+
+ ret = iterate_dir_item(sctx, sctx->send_root, sctx->left_path,
+ sctx->cmp_key, __process_new_xattr, sctx);
+
+ return ret;
+}
+
+static int process_deleted_xattr(struct send_ctx *sctx)
+{
+ int ret;
+
+ ret = iterate_dir_item(sctx, sctx->parent_root, sctx->right_path,
+ sctx->cmp_key, __process_deleted_xattr, sctx);
+
+ return ret;
+}
+
+struct find_xattr_ctx {
+ const char *name;
+ int name_len;
+ int found_idx;
+ char *found_data;
+ int found_data_len;
+};
+
+static int __find_xattr(int num, struct btrfs_key *di_key,
+ const char *name, int name_len,
+ const char *data, int data_len,
+ u8 type, void *vctx)
+{
+ struct find_xattr_ctx *ctx = vctx;
+
+ if (name_len == ctx->name_len &&
+ strncmp(name, ctx->name, name_len) == 0) {
+ ctx->found_idx = num;
+ ctx->found_data_len = data_len;
+ ctx->found_data = kmalloc(data_len, GFP_NOFS);
+ if (!ctx->found_data)
+ return -ENOMEM;
+ memcpy(ctx->found_data, data, data_len);
+ return 1;
+ }
+ return 0;
+}
+
+static int find_xattr(struct send_ctx *sctx,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct btrfs_key *key,
+ const char *name, int name_len,
+ char **data, int *data_len)
+{
+ int ret;
+ struct find_xattr_ctx ctx;
+
+ ctx.name = name;
+ ctx.name_len = name_len;
+ ctx.found_idx = -1;
+ ctx.found_data = NULL;
+ ctx.found_data_len = 0;
+
+ ret = iterate_dir_item(sctx, root, path, key, __find_xattr, &ctx);
+ if (ret < 0)
+ return ret;
+
+ if (ctx.found_idx == -1)
+ return -ENOENT;
+ if (data) {
+ *data = ctx.found_data;
+ *data_len = ctx.found_data_len;
+ } else {
+ kfree(ctx.found_data);
+ }
+ return ctx.found_idx;
+}
+
+
+static int __process_changed_new_xattr(int num, struct btrfs_key *di_key,
+ const char *name, int name_len,
+ const char *data, int data_len,
+ u8 type, void *ctx)
+{
+ int ret;
+ struct send_ctx *sctx = ctx;
+ char *found_data = NULL;
+ int found_data_len = 0;
+ struct fs_path *p = NULL;
+
+ ret = find_xattr(sctx, sctx->parent_root, sctx->right_path,
+ sctx->cmp_key, name, name_len, &found_data,
+ &found_data_len);
+ if (ret == -ENOENT) {
+ ret = __process_new_xattr(num, di_key, name, name_len, data,
+ data_len, type, ctx);
+ } else if (ret >= 0) {
+ if (data_len != found_data_len ||
+ memcmp(data, found_data, data_len)) {
+ ret = __process_new_xattr(num, di_key, name, name_len,
+ data, data_len, type, ctx);
+ } else {
+ ret = 0;
+ }
+ }
+
+ kfree(found_data);
+ fs_path_free(sctx, p);
+ return ret;
+}
+
+static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key,
+ const char *name, int name_len,
+ const char *data, int data_len,
+ u8 type, void *ctx)
+{
+ int ret;
+ struct send_ctx *sctx = ctx;
+
+ ret = find_xattr(sctx, sctx->send_root, sctx->left_path, sctx->cmp_key,
+ name, name_len, NULL, NULL);
+ if (ret == -ENOENT)
+ ret = __process_deleted_xattr(num, di_key, name, name_len, data,
+ data_len, type, ctx);
+ else if (ret >= 0)
+ ret = 0;
+
+ return ret;
+}
+
+static int process_changed_xattr(struct send_ctx *sctx)
+{
+ int ret = 0;
+
+ ret = iterate_dir_item(sctx, sctx->send_root, sctx->left_path,
+ sctx->cmp_key, __process_changed_new_xattr, sctx);
+ if (ret < 0)
+ goto out;
+ ret = iterate_dir_item(sctx, sctx->parent_root, sctx->right_path,
+ sctx->cmp_key, __process_changed_deleted_xattr, sctx);
+
+out:
+ return ret;
+}
+
+static int process_all_new_xattrs(struct send_ctx *sctx)
+{
+ int ret;
+ struct btrfs_root *root;
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ struct btrfs_key found_key;
+ struct extent_buffer *eb;
+ int slot;
+
+ path = alloc_path_for_send();
+ if (!path)
+ return -ENOMEM;
+
+ root = sctx->send_root;
+
+ key.objectid = sctx->cmp_key->objectid;
+ key.type = BTRFS_XATTR_ITEM_KEY;
+ key.offset = 0;
+ while (1) {
+ ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
+ if (ret < 0)
+ goto out;
+ if (ret) {
+ ret = 0;
+ goto out;
+ }
+
+ eb = path->nodes[0];
+ slot = path->slots[0];
+ btrfs_item_key_to_cpu(eb, &found_key, slot);
+
+ if (found_key.objectid != key.objectid ||
+ found_key.type != key.type) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = iterate_dir_item(sctx, root, path, &found_key,
+ __process_new_xattr, sctx);
+ if (ret < 0)
+ goto out;
+
+ btrfs_release_path(path);
+ key.offset = found_key.offset + 1;
+ }
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+/*
+ * Read some bytes from the current inode/file and send a write command to
+ * user space.
+ */
+static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
+{
+ int ret = 0;
+ struct fs_path *p;
+ loff_t pos = offset;
+ int readed = 0;
+ mm_segment_t old_fs;
+
+ p = fs_path_alloc(sctx);
+ if (!p)
+ return -ENOMEM;
+
+ /*
+ * vfs normally only accepts user space buffers for security reasons.
+ * we only read from the file and also only provide the read_buf buffer
+ * to vfs. As this buffer does not come from a user space call, it's
+ * ok to temporary allow kernel space buffers.
+ */
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
+verbose_printk("btrfs: send_write offset=%llu, len=%d\n", offset, len);
+
+ ret = open_cur_inode_file(sctx);
+ if (ret < 0)
+ goto out;
+
+ ret = vfs_read(sctx->cur_inode_filp, sctx->read_buf, len, &pos);
+ if (ret < 0)
+ goto out;
+ readed = ret;
+ if (!readed)
+ goto out;
+
+ ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
+ if (ret < 0)
+ goto out;
+
+ ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
+ if (ret < 0)
+ goto out;
+
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
+ TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, readed);
+
+ ret = send_cmd(sctx);
+
+tlv_put_failure:
+out:
+ fs_path_free(sctx, p);
+ set_fs(old_fs);
+ if (ret < 0)
+ return ret;
+ return readed;
+}
+
+/*
+ * Send a clone command to user space.
+ */
+static int send_clone(struct send_ctx *sctx,
+ u64 offset, u32 len,
+ struct clone_root *clone_root)
+{
+ int ret = 0;
+ struct btrfs_root *clone_root2 = clone_root->root;
+ struct fs_path *p;
+ u64 gen;
+
+verbose_printk("btrfs: send_clone offset=%llu, len=%d, clone_root=%llu, "
+ "clone_inode=%llu, clone_offset=%llu\n", offset, len,
+ clone_root->root->objectid, clone_root->ino,
+ clone_root->offset);
+
+ p = fs_path_alloc(sctx);
+ if (!p)
+ return -ENOMEM;
+
+ ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE);
+ if (ret < 0)
+ goto out;
+
+ ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
+ if (ret < 0)
+ goto out;
+
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len);
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
+
+ if (clone_root2 == sctx->send_root) {
+ ret = get_inode_info(sctx->send_root, clone_root->ino, NULL,
+ &gen, NULL, NULL, NULL);
+ if (ret < 0)
+ goto out;
+ ret = get_cur_path(sctx, clone_root->ino, gen, p);
+ } else {
+ ret = get_inode_path(sctx, clone_root2, clone_root->ino, p);
+ }
+ if (ret < 0)
+ goto out;
+
+ TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
+ clone_root2->root_item.uuid);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
+ clone_root2->root_item.ctransid);
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET,
+ clone_root->offset);
+
+ ret = send_cmd(sctx);
+
+tlv_put_failure:
+out:
+ fs_path_free(sctx, p);
+ return ret;
+}
+
+static int send_write_or_clone(struct send_ctx *sctx,
+ struct btrfs_path *path,
+ struct btrfs_key *key,
+ struct clone_root *clone_root)
+{
+ int ret = 0;
+ struct btrfs_file_extent_item *ei;
+ u64 offset = key->offset;
+ u64 pos = 0;
+ u64 len;
+ u32 l;
+ u8 type;
+
+ ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_file_extent_item);
+ type = btrfs_file_extent_type(path->nodes[0], ei);
+ if (type == BTRFS_FILE_EXTENT_INLINE)
+ len = btrfs_file_extent_inline_len(path->nodes[0], ei);
+ else
+ len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
+
+ if (offset + len > sctx->cur_inode_size)
+ len = sctx->cur_inode_size - offset;
+ if (len == 0) {
+ ret = 0;
+ goto out;
+ }
+
+ if (!clone_root) {
+ while (pos < len) {
+ l = len - pos;
+ if (l > BTRFS_SEND_READ_SIZE)
+ l = BTRFS_SEND_READ_SIZE;
+ ret = send_write(sctx, pos + offset, l);
+ if (ret < 0)
+ goto out;
+ if (!ret)
+ break;
+ pos += ret;
+ }
+ ret = 0;
+ } else {
+ ret = send_clone(sctx, offset, len, clone_root);
+ }
+
+out:
+ return ret;
+}
+
+static int is_extent_unchanged(struct send_ctx *sctx,
+ struct btrfs_path *left_path,
+ struct btrfs_key *ekey)
+{
+ int ret = 0;
+ struct btrfs_key key;
+ struct btrfs_path *path = NULL;
+ struct extent_buffer *eb;
+ int slot;
+ struct btrfs_key found_key;
+ struct btrfs_file_extent_item *ei;
+ u64 left_disknr;
+ u64 right_disknr;
+ u64 left_offset;
+ u64 right_offset;
+ u64 left_offset_fixed;
+ u64 left_len;
+ u64 right_len;
+ u8 left_type;
+ u8 right_type;
+
+ path = alloc_path_for_send();
+ if (!path)
+ return -ENOMEM;
+
+ eb = left_path->nodes[0];
+ slot = left_path->slots[0];
+
+ ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
+ left_type = btrfs_file_extent_type(eb, ei);
+ left_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
+ left_len = btrfs_file_extent_num_bytes(eb, ei);
+ left_offset = btrfs_file_extent_offset(eb, ei);
+
+ if (left_type != BTRFS_FILE_EXTENT_REG) {
+ ret = 0;
+ goto out;
+ }
+
+ /*
+ * Following comments will refer to these graphics. L is the left
+ * extents which we are checking at the moment. 1-8 are the right
+ * extents that we iterate.
+ *
+ * |-----L-----|
+ * |-1-|-2a-|-3-|-4-|-5-|-6-|
+ *
+ * |-----L-----|
+ * |--1--|-2b-|...(same as above)
+ *
+ * Alternative situation. Happens on files where extents got split.
+ * |-----L-----|
+ * |-----------7-----------|-6-|
+ *
+ * Alternative situation. Happens on files which got larger.
+ * |-----L-----|
+ * |-8-|
+ * Nothing follows after 8.
+ */
+
+ key.objectid = ekey->objectid;
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = ekey->offset;
+ ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0);
+ if (ret < 0)
+ goto out;
+ if (ret) {
+ ret = 0;
+ goto out;
+ }
+
+ /*
+ * Handle special case where the right side has no extents at all.
+ */
+ eb = path->nodes[0];
+ slot = path->slots[0];
+ btrfs_item_key_to_cpu(eb, &found_key, slot);
+ if (found_key.objectid != key.objectid ||
+ found_key.type != key.type) {
+ ret = 0;
+ goto out;
+ }
+
+ /*
+ * We're now on 2a, 2b or 7.
+ */
+ key = found_key;
+ while (key.offset < ekey->offset + left_len) {
+ ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
+ right_type = btrfs_file_extent_type(eb, ei);
+ right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
+ right_len = btrfs_file_extent_num_bytes(eb, ei);
+ right_offset = btrfs_file_extent_offset(eb, ei);
+
+ if (right_type != BTRFS_FILE_EXTENT_REG) {
+ ret = 0;
+ goto out;
+ }
+
+ /*
+ * Are we at extent 8? If yes, we know the extent is changed.
+ * This may only happen on the first iteration.
+ */
+ if (found_key.offset + right_len < ekey->offset) {
+ ret = 0;
+ goto out;
+ }
+
+ left_offset_fixed = left_offset;
+ if (key.offset < ekey->offset) {
+ /* Fix the right offset for 2a and 7. */
+ right_offset += ekey->offset - key.offset;
+ } else {
+ /* Fix the left offset for all behind 2a and 2b */
+ left_offset_fixed += key.offset - ekey->offset;
+ }
+
+ /*
+ * Check if we have the same extent.
+ */
+ if (left_disknr + left_offset_fixed !=
+ right_disknr + right_offset) {
+ ret = 0;
+ goto out;
+ }
+
+ /*
+ * Go to the next extent.
+ */
+ ret = btrfs_next_item(sctx->parent_root, path);
+ if (ret < 0)
+ goto out;
+ if (!ret) {
+ eb = path->nodes[0];
+ slot = path->slots[0];
+ btrfs_item_key_to_cpu(eb, &found_key, slot);
+ }
+ if (ret || found_key.objectid != key.objectid ||
+ found_key.type != key.type) {
+ key.offset += right_len;
+ break;
+ } else {
+ if (found_key.offset != key.offset + right_len) {
+ /* Should really not happen */
+ ret = -EIO;
+ goto out;
+ }
+ }
+ key = found_key;
+ }
+
+ /*
+ * We're now behind the left extent (treat as unchanged) or at the end
+ * of the right side (treat as changed).
+ */
+ if (key.offset >= ekey->offset + left_len)
+ ret = 1;
+ else
+ ret = 0;
+
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+static int process_extent(struct send_ctx *sctx,
+ struct btrfs_path *path,
+ struct btrfs_key *key)
+{
+ int ret = 0;
+ struct clone_root *found_clone = NULL;
+
+ if (S_ISLNK(sctx->cur_inode_mode))
+ return 0;
+
+ if (sctx->parent_root && !sctx->cur_inode_new) {
+ ret = is_extent_unchanged(sctx, path, key);
+ if (ret < 0)
+ goto out;
+ if (ret) {
+ ret = 0;
+ goto out;
+ }
+ }
+
+ ret = find_extent_clone(sctx, path, key->objectid, key->offset,
+ sctx->cur_inode_size, &found_clone);
+ if (ret != -ENOENT && ret < 0)
+ goto out;
+
+ ret = send_write_or_clone(sctx, path, key, found_clone);
+
+out:
+ return ret;
+}
+
+static int process_all_extents(struct send_ctx *sctx)
+{
+ int ret;
+ struct btrfs_root *root;
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ struct btrfs_key found_key;
+ struct extent_buffer *eb;
+ int slot;
+
+ root = sctx->send_root;
+ path = alloc_path_for_send();
+ if (!path)
+ return -ENOMEM;
+
+ key.objectid = sctx->cmp_key->objectid;
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = 0;
+ while (1) {
+ ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
+ if (ret < 0)
+ goto out;
+ if (ret) {
+ ret = 0;
+ goto out;
+ }
+
+ eb = path->nodes[0];
+ slot = path->slots[0];
+ btrfs_item_key_to_cpu(eb, &found_key, slot);
+
+ if (found_key.objectid != key.objectid ||
+ found_key.type != key.type) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = process_extent(sctx, path, &found_key);
+ if (ret < 0)
+ goto out;
+
+ btrfs_release_path(path);
+ key.offset = found_key.offset + 1;
+ }
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end)
+{
+ int ret = 0;
+
+ if (sctx->cur_ino == 0)
+ goto out;
+ if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid &&
+ sctx->cmp_key->type <= BTRFS_INODE_REF_KEY)
+ goto out;
+ if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs))
+ goto out;
+
+ ret = process_recorded_refs(sctx);
+
+out:
+ return ret;
+}
+
+static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
+{
+ int ret = 0;
+ u64 left_mode;
+ u64 left_uid;
+ u64 left_gid;
+ u64 right_mode;
+ u64 right_uid;
+ u64 right_gid;
+ int need_chmod = 0;
+ int need_chown = 0;
+
+ ret = process_recorded_refs_if_needed(sctx, at_end);
+ if (ret < 0)
+ goto out;
+
+ if (sctx->cur_ino == 0 || sctx->cur_inode_deleted)
+ goto out;
+ if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino)
+ goto out;
+
+ ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL,
+ &left_mode, &left_uid, &left_gid);
+ if (ret < 0)
+ goto out;
+
+ if (!S_ISLNK(sctx->cur_inode_mode)) {
+ if (!sctx->parent_root || sctx->cur_inode_new) {
+ need_chmod = 1;
+ need_chown = 1;
+ } else {
+ ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
+ NULL, NULL, &right_mode, &right_uid,
+ &right_gid);
+ if (ret < 0)
+ goto out;
+
+ if (left_uid != right_uid || left_gid != right_gid)
+ need_chown = 1;
+ if (left_mode != right_mode)
+ need_chmod = 1;
+ }
+ }
+
+ if (S_ISREG(sctx->cur_inode_mode)) {
+ ret = send_truncate(sctx, sctx->cur_ino, sctx->cur_inode_gen,
+ sctx->cur_inode_size);
+ if (ret < 0)
+ goto out;
+ }
+
+ if (need_chown) {
+ ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen,
+ left_uid, left_gid);
+ if (ret < 0)
+ goto out;
+ }
+ if (need_chmod) {
+ ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen,
+ left_mode);
+ if (ret < 0)
+ goto out;
+ }
+
+ /*
+ * Need to send that every time, no matter if it actually changed
+ * between the two trees as we have done changes to the inode before.
+ */
+ ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen);
+ if (ret < 0)
+ goto out;
+
+out:
+ return ret;
+}
+
+static int changed_inode(struct send_ctx *sctx,
+ enum btrfs_compare_tree_result result)
+{
+ int ret = 0;
+ struct btrfs_key *key = sctx->cmp_key;
+ struct btrfs_inode_item *left_ii = NULL;
+ struct btrfs_inode_item *right_ii = NULL;
+ u64 left_gen = 0;
+ u64 right_gen = 0;
+
+ ret = close_cur_inode_file(sctx);
+ if (ret < 0)
+ goto out;
+
+ sctx->cur_ino = key->objectid;
+ sctx->cur_inode_new_gen = 0;
+ sctx->cur_inode_first_ref_orphan = 0;
+ sctx->send_progress = sctx->cur_ino;
+
+ if (result == BTRFS_COMPARE_TREE_NEW ||
+ result == BTRFS_COMPARE_TREE_CHANGED) {
+ left_ii = btrfs_item_ptr(sctx->left_path->nodes[0],
+ sctx->left_path->slots[0],
+ struct btrfs_inode_item);
+ left_gen = btrfs_inode_generation(sctx->left_path->nodes[0],
+ left_ii);
+ } else {
+ right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
+ sctx->right_path->slots[0],
+ struct btrfs_inode_item);
+ right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
+ right_ii);
+ }
+ if (result == BTRFS_COMPARE_TREE_CHANGED) {
+ right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
+ sctx->right_path->slots[0],
+ struct btrfs_inode_item);
+
+ right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
+ right_ii);
+ if (left_gen != right_gen)
+ sctx->cur_inode_new_gen = 1;
+ }
+
+ if (result == BTRFS_COMPARE_TREE_NEW) {
+ sctx->cur_inode_gen = left_gen;
+ sctx->cur_inode_new = 1;
+ sctx->cur_inode_deleted = 0;
+ sctx->cur_inode_size = btrfs_inode_size(
+ sctx->left_path->nodes[0], left_ii);
+ sctx->cur_inode_mode = btrfs_inode_mode(
+ sctx->left_path->nodes[0], left_ii);
+ if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
+ ret = send_create_inode(sctx, sctx->left_path,
+ sctx->cmp_key);
+ } else if (result == BTRFS_COMPARE_TREE_DELETED) {
+ sctx->cur_inode_gen = right_gen;
+ sctx->cur_inode_new = 0;
+ sctx->cur_inode_deleted = 1;
+ sctx->cur_inode_size = btrfs_inode_size(
+ sctx->right_path->nodes[0], right_ii);
+ sctx->cur_inode_mode = btrfs_inode_mode(
+ sctx->right_path->nodes[0], right_ii);
+ } else if (result == BTRFS_COMPARE_TREE_CHANGED) {
+ if (sctx->cur_inode_new_gen) {
+ sctx->cur_inode_gen = right_gen;
+ sctx->cur_inode_new = 0;
+ sctx->cur_inode_deleted = 1;
+ sctx->cur_inode_size = btrfs_inode_size(
+ sctx->right_path->nodes[0], right_ii);
+ sctx->cur_inode_mode = btrfs_inode_mode(
+ sctx->right_path->nodes[0], right_ii);
+ ret = process_all_refs(sctx,
+ BTRFS_COMPARE_TREE_DELETED);
+ if (ret < 0)
+ goto out;
+
+ sctx->cur_inode_gen = left_gen;
+ sctx->cur_inode_new = 1;
+ sctx->cur_inode_deleted = 0;
+ sctx->cur_inode_size = btrfs_inode_size(
+ sctx->left_path->nodes[0], left_ii);
+ sctx->cur_inode_mode = btrfs_inode_mode(
+ sctx->left_path->nodes[0], left_ii);
+ ret = send_create_inode(sctx, sctx->left_path,
+ sctx->cmp_key);
+ if (ret < 0)
+ goto out;
+
+ ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW);
+ if (ret < 0)
+ goto out;
+ ret = process_all_extents(sctx);
+ if (ret < 0)
+ goto out;
+ ret = process_all_new_xattrs(sctx);
+ if (ret < 0)
+ goto out;
+ } else {
+ sctx->cur_inode_gen = left_gen;
+ sctx->cur_inode_new = 0;
+ sctx->cur_inode_new_gen = 0;
+ sctx->cur_inode_deleted = 0;
+ sctx->cur_inode_size = btrfs_inode_size(
+ sctx->left_path->nodes[0], left_ii);
+ sctx->cur_inode_mode = btrfs_inode_mode(
+ sctx->left_path->nodes[0], left_ii);
+ }
+ }
+
+out:
+ return ret;
+}
+
+static int changed_ref(struct send_ctx *sctx,
+ enum btrfs_compare_tree_result result)
+{
+ int ret = 0;
+
+ BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
+
+ if (!sctx->cur_inode_new_gen &&
+ sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) {
+ if (result == BTRFS_COMPARE_TREE_NEW)
+ ret = record_new_ref(sctx);
+ else if (result == BTRFS_COMPARE_TREE_DELETED)
+ ret = record_deleted_ref(sctx);
+ else if (result == BTRFS_COMPARE_TREE_CHANGED)
+ ret = record_changed_ref(sctx);
+ }
+
+ return ret;
+}
+
+static int changed_xattr(struct send_ctx *sctx,
+ enum btrfs_compare_tree_result result)
+{
+ int ret = 0;
+
+ BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
+
+ if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
+ if (result == BTRFS_COMPARE_TREE_NEW)
+ ret = process_new_xattr(sctx);
+ else if (result == BTRFS_COMPARE_TREE_DELETED)
+ ret = process_deleted_xattr(sctx);
+ else if (result == BTRFS_COMPARE_TREE_CHANGED)
+ ret = process_changed_xattr(sctx);
+ }
+
+ return ret;
+}
+
+static int changed_extent(struct send_ctx *sctx,
+ enum btrfs_compare_tree_result result)
+{
+ int ret = 0;
+
+ BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
+
+ if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
+ if (result != BTRFS_COMPARE_TREE_DELETED)
+ ret = process_extent(sctx, sctx->left_path,
+ sctx->cmp_key);
+ }
+
+ return ret;
+}
+
+
+static int changed_cb(struct btrfs_root *left_root,
+ struct btrfs_root *right_root,
+ struct btrfs_path *left_path,
+ struct btrfs_path *right_path,
+ struct btrfs_key *key,
+ enum btrfs_compare_tree_result result,
+ void *ctx)
+{
+ int ret = 0;
+ struct send_ctx *sctx = ctx;
+
+ sctx->left_path = left_path;
+ sctx->right_path = right_path;
+ sctx->cmp_key = key;
+
+ ret = finish_inode_if_needed(sctx, 0);
+ if (ret < 0)
+ goto out;
+
+ if (key->type == BTRFS_INODE_ITEM_KEY)
+ ret = changed_inode(sctx, result);
+ else if (key->type == BTRFS_INODE_REF_KEY)
+ ret = changed_ref(sctx, result);
+ else if (key->type == BTRFS_XATTR_ITEM_KEY)
+ ret = changed_xattr(sctx, result);
+ else if (key->type == BTRFS_EXTENT_DATA_KEY)
+ ret = changed_extent(sctx, result);
+
+out:
+ return ret;
+}
+
+static int full_send_tree(struct send_ctx *sctx)
+{
+ int ret;
+ struct btrfs_trans_handle *trans = NULL;
+ struct btrfs_root *send_root = sctx->send_root;
+ struct btrfs_key key;
+ struct btrfs_key found_key;
+ struct btrfs_path *path;
+ struct extent_buffer *eb;
+ int slot;
+ u64 start_ctransid;
+ u64 ctransid;
+
+ path = alloc_path_for_send();
+ if (!path)
+ return -ENOMEM;
+
+ spin_lock(&send_root->root_times_lock);
+ start_ctransid = btrfs_root_ctransid(&send_root->root_item);
+ spin_unlock(&send_root->root_times_lock);
+
+ key.objectid = BTRFS_FIRST_FREE_OBJECTID;
+ key.type = BTRFS_INODE_ITEM_KEY;
+ key.offset = 0;
+
+join_trans:
+ /*
+ * We need to make sure the transaction does not get committed
+ * while we do anything on commit roots. Join a transaction to prevent
+ * this.
+ */
+ trans = btrfs_join_transaction(send_root);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ trans = NULL;
+ goto out;
+ }
+
+ /*
+ * Make sure the tree has not changed
+ */
+ spin_lock(&send_root->root_times_lock);
+ ctransid = btrfs_root_ctransid(&send_root->root_item);
+ spin_unlock(&send_root->root_times_lock);
+
+ if (ctransid != start_ctransid) {
+ WARN(1, KERN_WARNING "btrfs: the root that you're trying to "
+ "send was modified in between. This is "
+ "probably a bug.\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0);
+ if (ret < 0)
+ goto out;
+ if (ret)
+ goto out_finish;
+
+ while (1) {
+ /*
+ * When someone want to commit while we iterate, end the
+ * joined transaction and rejoin.
+ */
+ if (btrfs_should_end_transaction(trans, send_root)) {
+ ret = btrfs_end_transaction(trans, send_root);
+ trans = NULL;
+ if (ret < 0)
+ goto out;
+ btrfs_release_path(path);
+ goto join_trans;
+ }
+
+ eb = path->nodes[0];
+ slot = path->slots[0];
+ btrfs_item_key_to_cpu(eb, &found_key, slot);
+
+ ret = changed_cb(send_root, NULL, path, NULL,
+ &found_key, BTRFS_COMPARE_TREE_NEW, sctx);
+ if (ret < 0)
+ goto out;
+
+ key.objectid = found_key.objectid;
+ key.type = found_key.type;
+ key.offset = found_key.offset + 1;
+
+ ret = btrfs_next_item(send_root, path);
+ if (ret < 0)
+ goto out;
+ if (ret) {
+ ret = 0;
+ break;
+ }
+ }
+
+out_finish:
+ ret = finish_inode_if_needed(sctx, 1);
+
+out:
+ btrfs_free_path(path);
+ if (trans) {
+ if (!ret)
+ ret = btrfs_end_transaction(trans, send_root);
+ else
+ btrfs_end_transaction(trans, send_root);
+ }
+ return ret;
+}
+
+static int send_subvol(struct send_ctx *sctx)
+{
+ int ret;
+
+ ret = send_header(sctx);
+ if (ret < 0)
+ goto out;
+
+ ret = send_subvol_begin(sctx);
+ if (ret < 0)
+ goto out;
+
+ if (sctx->parent_root) {
+ ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root,
+ changed_cb, sctx);
+ if (ret < 0)
+ goto out;
+ ret = finish_inode_if_needed(sctx, 1);
+ if (ret < 0)
+ goto out;
+ } else {
+ ret = full_send_tree(sctx);
+ if (ret < 0)
+ goto out;
+ }
+
+out:
+ if (!ret)
+ ret = close_cur_inode_file(sctx);
+ else
+ close_cur_inode_file(sctx);
+
+ free_recorded_refs(sctx);
+ return ret;
+}
+
+long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
+{
+ int ret = 0;
+ struct btrfs_root *send_root;
+ struct btrfs_root *clone_root;
+ struct btrfs_fs_info *fs_info;
+ struct btrfs_ioctl_send_args *arg = NULL;
+ struct btrfs_key key;
+ struct file *filp = NULL;
+ struct send_ctx *sctx = NULL;
+ u32 i;
+ u64 *clone_sources_tmp = NULL;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ send_root = BTRFS_I(fdentry(mnt_file)->d_inode)->root;
+ fs_info = send_root->fs_info;
+
+ arg = memdup_user(arg_, sizeof(*arg));
+ if (IS_ERR(arg)) {
+ ret = PTR_ERR(arg);
+ arg = NULL;
+ goto out;
+ }
+
+ if (!access_ok(VERIFY_READ, arg->clone_sources,
+ sizeof(*arg->clone_sources *
+ arg->clone_sources_count))) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ sctx = kzalloc(sizeof(struct send_ctx), GFP_NOFS);
+ if (!sctx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&sctx->new_refs);
+ INIT_LIST_HEAD(&sctx->deleted_refs);
+ INIT_RADIX_TREE(&sctx->name_cache, GFP_NOFS);
+ INIT_LIST_HEAD(&sctx->name_cache_list);
+
+ sctx->send_filp = fget(arg->send_fd);
+ if (IS_ERR(sctx->send_filp)) {
+ ret = PTR_ERR(sctx->send_filp);
+ goto out;
+ }
+
+ sctx->mnt = mnt_file->f_path.mnt;
+
+ sctx->send_root = send_root;
+ sctx->clone_roots_cnt = arg->clone_sources_count;
+
+ sctx->send_max_size = BTRFS_SEND_BUF_SIZE;
+ sctx->send_buf = vmalloc(sctx->send_max_size);
+ if (!sctx->send_buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ sctx->read_buf = vmalloc(BTRFS_SEND_READ_SIZE);
+ if (!sctx->read_buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ sctx->clone_roots = vzalloc(sizeof(struct clone_root) *
+ (arg->clone_sources_count + 1));
+ if (!sctx->clone_roots) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (arg->clone_sources_count) {
+ clone_sources_tmp = vmalloc(arg->clone_sources_count *
+ sizeof(*arg->clone_sources));
+ if (!clone_sources_tmp) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = copy_from_user(clone_sources_tmp, arg->clone_sources,
+ arg->clone_sources_count *
+ sizeof(*arg->clone_sources));
+ if (ret) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ for (i = 0; i < arg->clone_sources_count; i++) {
+ key.objectid = clone_sources_tmp[i];
+ key.type = BTRFS_ROOT_ITEM_KEY;
+ key.offset = (u64)-1;
+ clone_root = btrfs_read_fs_root_no_name(fs_info, &key);
+ if (!clone_root) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (IS_ERR(clone_root)) {
+ ret = PTR_ERR(clone_root);
+ goto out;
+ }
+ sctx->clone_roots[i].root = clone_root;
+ }
+ vfree(clone_sources_tmp);
+ clone_sources_tmp = NULL;
+ }
+
+ if (arg->parent_root) {
+ key.objectid = arg->parent_root;
+ key.type = BTRFS_ROOT_ITEM_KEY;
+ key.offset = (u64)-1;
+ sctx->parent_root = btrfs_read_fs_root_no_name(fs_info, &key);
+ if (!sctx->parent_root) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ /*
+ * Clones from send_root are allowed, but only if the clone source
+ * is behind the current send position. This is checked while searching
+ * for possible clone sources.
+ */
+ sctx->clone_roots[sctx->clone_roots_cnt++].root = sctx->send_root;
+
+ /* We do a bsearch later */
+ sort(sctx->clone_roots, sctx->clone_roots_cnt,
+ sizeof(*sctx->clone_roots), __clone_root_cmp_sort,
+ NULL);
+
+ ret = send_subvol(sctx);
+ if (ret < 0)
+ goto out;
+
+ ret = begin_cmd(sctx, BTRFS_SEND_C_END);
+ if (ret < 0)
+ goto out;
+ ret = send_cmd(sctx);
+ if (ret < 0)
+ goto out;
+
+out:
+ if (filp)
+ fput(filp);
+ kfree(arg);
+ vfree(clone_sources_tmp);
+
+ if (sctx) {
+ if (sctx->send_filp)
+ fput(sctx->send_filp);
+
+ vfree(sctx->clone_roots);
+ vfree(sctx->send_buf);
+ vfree(sctx->read_buf);
+
+ name_cache_free(sctx);
+
+ kfree(sctx);
+ }
+
+ return ret;
+}
diff --git a/fs/btrfs/send.h b/fs/btrfs/send.h
new file mode 100644
index 00000000000..9934e948e57
--- /dev/null
+++ b/fs/btrfs/send.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2012 Alexander Block. All rights reserved.
+ * Copyright (C) 2012 STRATO. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include "ctree.h"
+
+#define BTRFS_SEND_STREAM_MAGIC "btrfs-stream"
+#define BTRFS_SEND_STREAM_VERSION 1
+
+#define BTRFS_SEND_BUF_SIZE (1024 * 64)
+#define BTRFS_SEND_READ_SIZE (1024 * 48)
+
+enum btrfs_tlv_type {
+ BTRFS_TLV_U8,
+ BTRFS_TLV_U16,
+ BTRFS_TLV_U32,
+ BTRFS_TLV_U64,
+ BTRFS_TLV_BINARY,
+ BTRFS_TLV_STRING,
+ BTRFS_TLV_UUID,
+ BTRFS_TLV_TIMESPEC,
+};
+
+struct btrfs_stream_header {
+ char magic[sizeof(BTRFS_SEND_STREAM_MAGIC)];
+ __le32 version;
+} __attribute__ ((__packed__));
+
+struct btrfs_cmd_header {
+ /* len excluding the header */
+ __le32 len;
+ __le16 cmd;
+ /* crc including the header with zero crc field */
+ __le32 crc;
+} __attribute__ ((__packed__));
+
+struct btrfs_tlv_header {
+ __le16 tlv_type;
+ /* len excluding the header */
+ __le16 tlv_len;
+} __attribute__ ((__packed__));
+
+/* commands */
+enum btrfs_send_cmd {
+ BTRFS_SEND_C_UNSPEC,
+
+ BTRFS_SEND_C_SUBVOL,
+ BTRFS_SEND_C_SNAPSHOT,
+
+ BTRFS_SEND_C_MKFILE,
+ BTRFS_SEND_C_MKDIR,
+ BTRFS_SEND_C_MKNOD,
+ BTRFS_SEND_C_MKFIFO,
+ BTRFS_SEND_C_MKSOCK,
+ BTRFS_SEND_C_SYMLINK,
+
+ BTRFS_SEND_C_RENAME,
+ BTRFS_SEND_C_LINK,
+ BTRFS_SEND_C_UNLINK,
+ BTRFS_SEND_C_RMDIR,
+
+ BTRFS_SEND_C_SET_XATTR,
+ BTRFS_SEND_C_REMOVE_XATTR,
+
+ BTRFS_SEND_C_WRITE,
+ BTRFS_SEND_C_CLONE,
+
+ BTRFS_SEND_C_TRUNCATE,
+ BTRFS_SEND_C_CHMOD,
+ BTRFS_SEND_C_CHOWN,
+ BTRFS_SEND_C_UTIMES,
+
+ BTRFS_SEND_C_END,
+ __BTRFS_SEND_C_MAX,
+};
+#define BTRFS_SEND_C_MAX (__BTRFS_SEND_C_MAX - 1)
+
+/* attributes in send stream */
+enum {
+ BTRFS_SEND_A_UNSPEC,
+
+ BTRFS_SEND_A_UUID,
+ BTRFS_SEND_A_CTRANSID,
+
+ BTRFS_SEND_A_INO,
+ BTRFS_SEND_A_SIZE,
+ BTRFS_SEND_A_MODE,
+ BTRFS_SEND_A_UID,
+ BTRFS_SEND_A_GID,
+ BTRFS_SEND_A_RDEV,
+ BTRFS_SEND_A_CTIME,
+ BTRFS_SEND_A_MTIME,
+ BTRFS_SEND_A_ATIME,
+ BTRFS_SEND_A_OTIME,
+
+ BTRFS_SEND_A_XATTR_NAME,
+ BTRFS_SEND_A_XATTR_DATA,
+
+ BTRFS_SEND_A_PATH,
+ BTRFS_SEND_A_PATH_TO,
+ BTRFS_SEND_A_PATH_LINK,
+
+ BTRFS_SEND_A_FILE_OFFSET,
+ BTRFS_SEND_A_DATA,
+
+ BTRFS_SEND_A_CLONE_UUID,
+ BTRFS_SEND_A_CLONE_CTRANSID,
+ BTRFS_SEND_A_CLONE_PATH,
+ BTRFS_SEND_A_CLONE_OFFSET,
+ BTRFS_SEND_A_CLONE_LEN,
+
+ __BTRFS_SEND_A_MAX,
+};
+#define BTRFS_SEND_A_MAX (__BTRFS_SEND_A_MAX - 1)
+
+#ifdef __KERNEL__
+long btrfs_ioctl_send(struct file *mnt_file, void __user *arg);
+#endif
diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c
index c6ffa581241..b976597b072 100644
--- a/fs/btrfs/struct-funcs.c
+++ b/fs/btrfs/struct-funcs.c
@@ -17,15 +17,27 @@
*/
#include <linux/highmem.h>
+#include <asm/unaligned.h>
-/* this is some deeply nasty code. ctree.h has a different
- * definition for this BTRFS_SETGET_FUNCS macro, behind a #ifndef
+#include "ctree.h"
+
+static inline u8 get_unaligned_le8(const void *p)
+{
+ return *(u8 *)p;
+}
+
+static inline void put_unaligned_le8(u8 val, void *p)
+{
+ *(u8 *)p = val;
+}
+
+/*
+ * this is some deeply nasty code.
*
* The end result is that anyone who #includes ctree.h gets a
- * declaration for the btrfs_set_foo functions and btrfs_foo functions
- *
- * This file declares the macros and then #includes ctree.h, which results
- * in cpp creating the function here based on the template below.
+ * declaration for the btrfs_set_foo functions and btrfs_foo functions,
+ * which are wappers of btrfs_set_token_#bits functions and
+ * btrfs_get_token_#bits functions, which are defined in this file.
*
* These setget functions do all the extent_buffer related mapping
* required to efficiently read and write specific fields in the extent
@@ -33,103 +45,93 @@
* an unsigned long offset into the extent buffer which has been
* cast to a specific type. This gives us all the gcc type checking.
*
- * The extent buffer api is used to do all the kmapping and page
- * spanning work required to get extent buffers in highmem and have
- * a metadata blocksize different from the page size.
- *
- * The macro starts with a simple function prototype declaration so that
- * sparse won't complain about it being static.
+ * The extent buffer api is used to do the page spanning work required to
+ * have a metadata blocksize different from the page size.
*/
-#define BTRFS_SETGET_FUNCS(name, type, member, bits) \
-u##bits btrfs_##name(struct extent_buffer *eb, type *s); \
-void btrfs_set_##name(struct extent_buffer *eb, type *s, u##bits val); \
-void btrfs_set_token_##name(struct extent_buffer *eb, type *s, u##bits val, struct btrfs_map_token *token); \
-u##bits btrfs_token_##name(struct extent_buffer *eb, \
- type *s, struct btrfs_map_token *token) \
+#define DEFINE_BTRFS_SETGET_BITS(bits) \
+u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr, \
+ unsigned long off, \
+ struct btrfs_map_token *token) \
{ \
- unsigned long part_offset = (unsigned long)s; \
- unsigned long offset = part_offset + offsetof(type, member); \
- type *p; \
- int err; \
- char *kaddr; \
- unsigned long map_start; \
- unsigned long map_len; \
- unsigned long mem_len = sizeof(((type *)0)->member); \
- u##bits res; \
- if (token && token->kaddr && token->offset <= offset && \
- token->eb == eb && \
- (token->offset + PAGE_CACHE_SIZE >= offset + mem_len)) { \
- kaddr = token->kaddr; \
- p = (type *)(kaddr + part_offset - token->offset); \
- res = le##bits##_to_cpu(p->member); \
- return res; \
- } \
- err = map_private_extent_buffer(eb, offset, \
- mem_len, \
- &kaddr, &map_start, &map_len); \
- if (err) { \
- __le##bits leres; \
- read_eb_member(eb, s, type, member, &leres); \
- return le##bits##_to_cpu(leres); \
- } \
- p = (type *)(kaddr + part_offset - map_start); \
- res = le##bits##_to_cpu(p->member); \
- if (token) { \
- token->kaddr = kaddr; \
- token->offset = map_start; \
- token->eb = eb; \
- } \
- return res; \
+ unsigned long part_offset = (unsigned long)ptr; \
+ unsigned long offset = part_offset + off; \
+ void *p; \
+ int err; \
+ char *kaddr; \
+ unsigned long map_start; \
+ unsigned long map_len; \
+ int size = sizeof(u##bits); \
+ u##bits res; \
+ \
+ if (token && token->kaddr && token->offset <= offset && \
+ token->eb == eb && \
+ (token->offset + PAGE_CACHE_SIZE >= offset + size)) { \
+ kaddr = token->kaddr; \
+ p = kaddr + part_offset - token->offset; \
+ res = get_unaligned_le##bits(p + off); \
+ return res; \
+ } \
+ err = map_private_extent_buffer(eb, offset, size, \
+ &kaddr, &map_start, &map_len); \
+ if (err) { \
+ __le##bits leres; \
+ \
+ read_extent_buffer(eb, &leres, offset, size); \
+ return le##bits##_to_cpu(leres); \
+ } \
+ p = kaddr + part_offset - map_start; \
+ res = get_unaligned_le##bits(p + off); \
+ if (token) { \
+ token->kaddr = kaddr; \
+ token->offset = map_start; \
+ token->eb = eb; \
+ } \
+ return res; \
} \
-void btrfs_set_token_##name(struct extent_buffer *eb, \
- type *s, u##bits val, struct btrfs_map_token *token) \
+void btrfs_set_token_##bits(struct extent_buffer *eb, \
+ void *ptr, unsigned long off, u##bits val, \
+ struct btrfs_map_token *token) \
{ \
- unsigned long part_offset = (unsigned long)s; \
- unsigned long offset = part_offset + offsetof(type, member); \
- type *p; \
- int err; \
- char *kaddr; \
- unsigned long map_start; \
- unsigned long map_len; \
- unsigned long mem_len = sizeof(((type *)0)->member); \
- if (token && token->kaddr && token->offset <= offset && \
- token->eb == eb && \
- (token->offset + PAGE_CACHE_SIZE >= offset + mem_len)) { \
- kaddr = token->kaddr; \
- p = (type *)(kaddr + part_offset - token->offset); \
- p->member = cpu_to_le##bits(val); \
- return; \
- } \
- err = map_private_extent_buffer(eb, offset, \
- mem_len, \
- &kaddr, &map_start, &map_len); \
- if (err) { \
- __le##bits val2; \
- val2 = cpu_to_le##bits(val); \
- write_eb_member(eb, s, type, member, &val2); \
- return; \
- } \
- p = (type *)(kaddr + part_offset - map_start); \
- p->member = cpu_to_le##bits(val); \
- if (token) { \
- token->kaddr = kaddr; \
- token->offset = map_start; \
- token->eb = eb; \
- } \
-} \
-void btrfs_set_##name(struct extent_buffer *eb, \
- type *s, u##bits val) \
-{ \
- btrfs_set_token_##name(eb, s, val, NULL); \
-} \
-u##bits btrfs_##name(struct extent_buffer *eb, \
- type *s) \
-{ \
- return btrfs_token_##name(eb, s, NULL); \
-} \
+ unsigned long part_offset = (unsigned long)ptr; \
+ unsigned long offset = part_offset + off; \
+ void *p; \
+ int err; \
+ char *kaddr; \
+ unsigned long map_start; \
+ unsigned long map_len; \
+ int size = sizeof(u##bits); \
+ \
+ if (token && token->kaddr && token->offset <= offset && \
+ token->eb == eb && \
+ (token->offset + PAGE_CACHE_SIZE >= offset + size)) { \
+ kaddr = token->kaddr; \
+ p = kaddr + part_offset - token->offset; \
+ put_unaligned_le##bits(val, p + off); \
+ return; \
+ } \
+ err = map_private_extent_buffer(eb, offset, size, \
+ &kaddr, &map_start, &map_len); \
+ if (err) { \
+ __le##bits val2; \
+ \
+ val2 = cpu_to_le##bits(val); \
+ write_extent_buffer(eb, &val2, offset, size); \
+ return; \
+ } \
+ p = kaddr + part_offset - map_start; \
+ put_unaligned_le##bits(val, p + off); \
+ if (token) { \
+ token->kaddr = kaddr; \
+ token->offset = map_start; \
+ token->eb = eb; \
+ } \
+}
-#include "ctree.h"
+DEFINE_BTRFS_SETGET_BITS(8)
+DEFINE_BTRFS_SETGET_BITS(16)
+DEFINE_BTRFS_SETGET_BITS(32)
+DEFINE_BTRFS_SETGET_BITS(64)
void btrfs_node_key(struct extent_buffer *eb,
struct btrfs_disk_key *disk_key, int nr)
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index b19d7556772..fa61ef59cd6 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -396,15 +396,23 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
strcmp(args[0].from, "zlib") == 0) {
compress_type = "zlib";
info->compress_type = BTRFS_COMPRESS_ZLIB;
+ btrfs_set_opt(info->mount_opt, COMPRESS);
} else if (strcmp(args[0].from, "lzo") == 0) {
compress_type = "lzo";
info->compress_type = BTRFS_COMPRESS_LZO;
+ btrfs_set_opt(info->mount_opt, COMPRESS);
+ btrfs_set_fs_incompat(info, COMPRESS_LZO);
+ } else if (strncmp(args[0].from, "no", 2) == 0) {
+ compress_type = "no";
+ info->compress_type = BTRFS_COMPRESS_NONE;
+ btrfs_clear_opt(info->mount_opt, COMPRESS);
+ btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
+ compress_force = false;
} else {
ret = -EINVAL;
goto out;
}
- btrfs_set_opt(info->mount_opt, COMPRESS);
if (compress_force) {
btrfs_set_opt(info->mount_opt, FORCE_COMPRESS);
pr_info("btrfs: force %s compression\n",
@@ -1455,6 +1463,13 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
ret = btrfs_scan_one_device(vol->name, FMODE_READ,
&btrfs_fs_type, &fs_devices);
break;
+ case BTRFS_IOC_DEVICES_READY:
+ ret = btrfs_scan_one_device(vol->name, FMODE_READ,
+ &btrfs_fs_type, &fs_devices);
+ if (ret)
+ break;
+ ret = !(fs_devices->num_devices == fs_devices->total_devices);
+ break;
}
kfree(vol);
@@ -1477,16 +1492,6 @@ static int btrfs_unfreeze(struct super_block *sb)
return 0;
}
-static void btrfs_fs_dirty_inode(struct inode *inode, int flags)
-{
- int ret;
-
- ret = btrfs_dirty_inode(inode);
- if (ret)
- printk_ratelimited(KERN_ERR "btrfs: fail to dirty inode %Lu "
- "error %d\n", btrfs_ino(inode), ret);
-}
-
static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
{
struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb);
@@ -1526,7 +1531,6 @@ static const struct super_operations btrfs_super_ops = {
.show_options = btrfs_show_options,
.show_devname = btrfs_show_devname,
.write_inode = btrfs_write_inode,
- .dirty_inode = btrfs_fs_dirty_inode,
.alloc_inode = btrfs_alloc_inode,
.destroy_inode = btrfs_destroy_inode,
.statfs = btrfs_statfs,
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index b72b068183e..7ac7cdcc294 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -22,6 +22,7 @@
#include <linux/writeback.h>
#include <linux/pagemap.h>
#include <linux/blkdev.h>
+#include <linux/uuid.h>
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
@@ -38,7 +39,6 @@ void put_transaction(struct btrfs_transaction *transaction)
if (atomic_dec_and_test(&transaction->use_count)) {
BUG_ON(!list_empty(&transaction->list));
WARN_ON(transaction->delayed_refs.root.rb_node);
- WARN_ON(!list_empty(&transaction->delayed_refs.seq_head));
memset(transaction, 0, sizeof(*transaction));
kmem_cache_free(btrfs_transaction_cachep, transaction);
}
@@ -100,8 +100,8 @@ loop:
kmem_cache_free(btrfs_transaction_cachep, cur_trans);
cur_trans = fs_info->running_transaction;
goto loop;
- } else if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
- spin_unlock(&root->fs_info->trans_lock);
+ } else if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+ spin_unlock(&fs_info->trans_lock);
kmem_cache_free(btrfs_transaction_cachep, cur_trans);
return -EROFS;
}
@@ -126,7 +126,6 @@ loop:
cur_trans->delayed_refs.num_heads = 0;
cur_trans->delayed_refs.flushing = 0;
cur_trans->delayed_refs.run_delayed_start = 0;
- cur_trans->delayed_refs.seq = 1;
/*
* although the tree mod log is per file system and not per transaction,
@@ -145,10 +144,8 @@ loop:
}
atomic_set(&fs_info->tree_mod_seq, 0);
- init_waitqueue_head(&cur_trans->delayed_refs.seq_wait);
spin_lock_init(&cur_trans->commit_lock);
spin_lock_init(&cur_trans->delayed_refs.lock);
- INIT_LIST_HEAD(&cur_trans->delayed_refs.seq_head);
INIT_LIST_HEAD(&cur_trans->pending_snapshots);
list_add_tail(&cur_trans->list, &fs_info->trans_list);
@@ -299,6 +296,7 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
struct btrfs_transaction *cur_trans;
u64 num_bytes = 0;
int ret;
+ u64 qgroup_reserved = 0;
if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
return ERR_PTR(-EROFS);
@@ -317,6 +315,14 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
* the appropriate flushing if need be.
*/
if (num_items > 0 && root != root->fs_info->chunk_root) {
+ if (root->fs_info->quota_enabled &&
+ is_fstree(root->root_key.objectid)) {
+ qgroup_reserved = num_items * root->leafsize;
+ ret = btrfs_qgroup_reserve(root, qgroup_reserved);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
ret = btrfs_block_rsv_add(root,
&root->fs_info->trans_block_rsv,
@@ -349,11 +355,16 @@ again:
h->transaction = cur_trans;
h->blocks_used = 0;
h->bytes_reserved = 0;
+ h->root = root;
h->delayed_ref_updates = 0;
h->use_count = 1;
+ h->adding_csums = 0;
h->block_rsv = NULL;
h->orig_rsv = NULL;
h->aborted = 0;
+ h->qgroup_reserved = qgroup_reserved;
+ h->delayed_ref_elem.seq = 0;
+ INIT_LIST_HEAD(&h->qgroup_ref_list);
smp_mb();
if (cur_trans->blocked && may_wait_transaction(root, type)) {
@@ -473,7 +484,6 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
struct btrfs_transaction *cur_trans = trans->transaction;
- struct btrfs_block_rsv *rsv = trans->block_rsv;
int updates;
int err;
@@ -481,12 +491,6 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
return 1;
- /*
- * We need to do this in case we're deleting csums so the global block
- * rsv get's used instead of the csum block rsv.
- */
- trans->block_rsv = NULL;
-
updates = trans->delayed_ref_updates;
trans->delayed_ref_updates = 0;
if (updates) {
@@ -495,8 +499,6 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
return err;
}
- trans->block_rsv = rsv;
-
return should_end_transaction(trans, root);
}
@@ -513,8 +515,24 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
return 0;
}
+ /*
+ * do the qgroup accounting as early as possible
+ */
+ err = btrfs_delayed_refs_qgroup_accounting(trans, info);
+
btrfs_trans_release_metadata(trans, root);
trans->block_rsv = NULL;
+ /*
+ * the same root has to be passed to start_transaction and
+ * end_transaction. Subvolume quota depends on this.
+ */
+ WARN_ON(trans->root != root);
+
+ if (trans->qgroup_reserved) {
+ btrfs_qgroup_free(root, trans->qgroup_reserved);
+ trans->qgroup_reserved = 0;
+ }
+
while (count < 2) {
unsigned long cur = trans->delayed_ref_updates;
trans->delayed_ref_updates = 0;
@@ -527,6 +545,8 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
}
count++;
}
+ btrfs_trans_release_metadata(trans, root);
+ trans->block_rsv = NULL;
if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
should_end_transaction(trans, root)) {
@@ -567,6 +587,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
err = -EIO;
}
+ assert_qgroups_uptodate(trans);
memset(trans, 0, sizeof(*trans));
kmem_cache_free(btrfs_trans_handle_cachep, trans);
@@ -785,6 +806,13 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
ret = btrfs_run_dev_stats(trans, root->fs_info);
BUG_ON(ret);
+ ret = btrfs_run_qgroups(trans, root->fs_info);
+ BUG_ON(ret);
+
+ /* run_qgroups might have added some more refs */
+ ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+ BUG_ON(ret);
+
while (!list_empty(&fs_info->dirty_cowonly_roots)) {
next = fs_info->dirty_cowonly_roots.next;
list_del_init(next);
@@ -926,11 +954,13 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
struct dentry *dentry;
struct extent_buffer *tmp;
struct extent_buffer *old;
+ struct timespec cur_time = CURRENT_TIME;
int ret;
u64 to_reserve = 0;
u64 index = 0;
u64 objectid;
u64 root_flags;
+ uuid_le new_uuid;
rsv = trans->block_rsv;
@@ -957,6 +987,14 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
}
}
+ ret = btrfs_qgroup_inherit(trans, fs_info, root->root_key.objectid,
+ objectid, pending->inherit);
+ kfree(pending->inherit);
+ if (ret) {
+ pending->error = ret;
+ goto fail;
+ }
+
key.objectid = objectid;
key.offset = (u64)-1;
key.type = BTRFS_ROOT_ITEM_KEY;
@@ -1016,6 +1054,20 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
btrfs_set_root_flags(new_root_item, root_flags);
+ btrfs_set_root_generation_v2(new_root_item,
+ trans->transid);
+ uuid_le_gen(&new_uuid);
+ memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
+ memcpy(new_root_item->parent_uuid, root->root_item.uuid,
+ BTRFS_UUID_SIZE);
+ new_root_item->otime.sec = cpu_to_le64(cur_time.tv_sec);
+ new_root_item->otime.nsec = cpu_to_le64(cur_time.tv_nsec);
+ btrfs_set_root_otransid(new_root_item, trans->transid);
+ memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
+ memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
+ btrfs_set_root_stransid(new_root_item, 0);
+ btrfs_set_root_rtransid(new_root_item, 0);
+
old = btrfs_lock_root_node(root);
ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
if (ret) {
@@ -1269,9 +1321,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
btrfs_run_ordered_operations(root, 0);
- btrfs_trans_release_metadata(trans, root);
- trans->block_rsv = NULL;
-
if (cur_trans->aborted)
goto cleanup_transaction;
@@ -1282,6 +1331,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
if (ret)
goto cleanup_transaction;
+ btrfs_trans_release_metadata(trans, root);
+ trans->block_rsv = NULL;
+
cur_trans = trans->transaction;
/*
@@ -1330,7 +1382,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
spin_unlock(&root->fs_info->trans_lock);
}
- if (now < cur_trans->start_time || now - cur_trans->start_time < 1)
+ if (!btrfs_test_opt(root, SSD) &&
+ (now < cur_trans->start_time || now - cur_trans->start_time < 1))
should_grow = 1;
do {
@@ -1352,6 +1405,13 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
goto cleanup_transaction;
/*
+ * running the delayed items may have added new refs. account
+ * them now so that they hinder processing of more delayed refs
+ * as little as possible.
+ */
+ btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
+
+ /*
* rename don't use btrfs_join_transaction, so, once we
* set the transaction to blocked above, we aren't going
* to get any new ordered operations. We can safely run
@@ -1463,6 +1523,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
root->fs_info->chunk_root->node);
switch_commit_root(root->fs_info->chunk_root);
+ assert_qgroups_uptodate(trans);
update_super_roots(root);
if (!root->fs_info->log_root_recovering) {
@@ -1532,6 +1593,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
return ret;
cleanup_transaction:
+ btrfs_trans_release_metadata(trans, root);
+ trans->block_rsv = NULL;
btrfs_printk(root->fs_info, "Skipping commit of aborted transaction.\n");
// WARN_ON(1);
if (current->journal_info == trans)
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index fe27379e368..e8b8416c688 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -20,6 +20,7 @@
#define __BTRFS_TRANSACTION__
#include "btrfs_inode.h"
#include "delayed-ref.h"
+#include "ctree.h"
struct btrfs_transaction {
u64 transid;
@@ -49,6 +50,7 @@ struct btrfs_transaction {
struct btrfs_trans_handle {
u64 transid;
u64 bytes_reserved;
+ u64 qgroup_reserved;
unsigned long use_count;
unsigned long blocks_reserved;
unsigned long blocks_used;
@@ -57,12 +59,22 @@ struct btrfs_trans_handle {
struct btrfs_block_rsv *block_rsv;
struct btrfs_block_rsv *orig_rsv;
int aborted;
+ int adding_csums;
+ /*
+ * this root is only needed to validate that the root passed to
+ * start_transaction is the same as the one passed to end_transaction.
+ * Subvolume quota depends on this
+ */
+ struct btrfs_root *root;
+ struct seq_list delayed_ref_elem;
+ struct list_head qgroup_ref_list;
};
struct btrfs_pending_snapshot {
struct dentry *dentry;
struct btrfs_root *root;
struct btrfs_root *snap;
+ struct btrfs_qgroup_inherit *inherit;
/* block reservation for the operation */
struct btrfs_block_rsv block_rsv;
/* extra metadata reseration for relocation */
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 8abeae4224f..c86670f4f28 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -637,7 +637,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
}
inode_set_bytes(inode, saved_nbytes);
- btrfs_update_inode(trans, root, inode);
+ ret = btrfs_update_inode(trans, root, inode);
out:
if (inode)
iput(inode);
@@ -1133,7 +1133,7 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
btrfs_release_path(path);
if (ret == 0) {
btrfs_inc_nlink(inode);
- btrfs_update_inode(trans, root, inode);
+ ret = btrfs_update_inode(trans, root, inode);
} else if (ret == -EEXIST) {
ret = 0;
} else {
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index ecaad40e7ef..b8708f994e6 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -429,6 +429,7 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
mutex_init(&fs_devices->device_list_mutex);
fs_devices->latest_devid = orig->latest_devid;
fs_devices->latest_trans = orig->latest_trans;
+ fs_devices->total_devices = orig->total_devices;
memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
/* We have held the volume lock, it is safe to get the devices. */
@@ -739,6 +740,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
int ret;
u64 devid;
u64 transid;
+ u64 total_devices;
flags |= FMODE_EXCL;
bdev = blkdev_get_by_path(path, flags, holder);
@@ -760,6 +762,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
disk_super = (struct btrfs_super_block *)bh->b_data;
devid = btrfs_stack_device_id(&disk_super->dev_item);
transid = btrfs_super_generation(disk_super);
+ total_devices = btrfs_super_num_devices(disk_super);
if (disk_super->label[0])
printk(KERN_INFO "device label %s ", disk_super->label);
else
@@ -767,7 +770,8 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
printk(KERN_CONT "devid %llu transid %llu %s\n",
(unsigned long long)devid, (unsigned long long)transid, path);
ret = device_list_add(path, disk_super, devid, fs_devices_ret);
-
+ if (!ret && fs_devices_ret)
+ (*fs_devices_ret)->total_devices = total_devices;
brelse(bh);
error_close:
mutex_unlock(&uuid_mutex);
@@ -1433,6 +1437,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
list_del_rcu(&device->dev_list);
device->fs_devices->num_devices--;
+ device->fs_devices->total_devices--;
if (device->missing)
root->fs_info->fs_devices->missing_devices--;
@@ -1550,6 +1555,7 @@ static int btrfs_prepare_sprout(struct btrfs_root *root)
fs_devices->seeding = 0;
fs_devices->num_devices = 0;
fs_devices->open_devices = 0;
+ fs_devices->total_devices = 0;
fs_devices->seed = seed_devices;
generate_random_uuid(fs_devices->fsid);
@@ -1749,6 +1755,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
root->fs_info->fs_devices->num_devices++;
root->fs_info->fs_devices->open_devices++;
root->fs_info->fs_devices->rw_devices++;
+ root->fs_info->fs_devices->total_devices++;
if (device->can_discard)
root->fs_info->fs_devices->num_can_discard++;
root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
@@ -4736,9 +4743,6 @@ int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
key.offset = device->devid;
ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
if (ret) {
- printk_in_rcu(KERN_WARNING "btrfs: no dev_stats entry found for device %s (devid %llu) (OK on first mount after mkfs)\n",
- rcu_str_deref(device->name),
- (unsigned long long)device->devid);
__btrfs_reset_dev_stats(device);
device->dev_stats_valid = 1;
btrfs_release_path(path);
@@ -4880,6 +4884,14 @@ void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
{
+ int i;
+
+ for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
+ if (btrfs_dev_stat_read(dev, i) != 0)
+ break;
+ if (i == BTRFS_DEV_STAT_VALUES_MAX)
+ return; /* all values == 0, suppress message */
+
printk_in_rcu(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
rcu_str_deref(dev->name),
btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
@@ -4890,8 +4902,7 @@ static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
}
int btrfs_get_dev_stats(struct btrfs_root *root,
- struct btrfs_ioctl_get_dev_stats *stats,
- int reset_after_read)
+ struct btrfs_ioctl_get_dev_stats *stats)
{
struct btrfs_device *dev;
struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
@@ -4909,7 +4920,7 @@ int btrfs_get_dev_stats(struct btrfs_root *root,
printk(KERN_WARNING
"btrfs: get dev_stats failed, not yet valid\n");
return -ENODEV;
- } else if (reset_after_read) {
+ } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
if (stats->nr_items > i)
stats->values[i] =
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 95f6637614d..5479325987b 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -126,6 +126,7 @@ struct btrfs_fs_devices {
u64 missing_devices;
u64 total_rw_bytes;
u64 num_can_discard;
+ u64 total_devices;
struct block_device *latest_bdev;
/* all of the devices in the FS, protected by a mutex
@@ -293,8 +294,7 @@ struct btrfs_device *btrfs_find_device_for_logical(struct btrfs_root *root,
void btrfs_dev_stat_print_on_error(struct btrfs_device *device);
void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index);
int btrfs_get_dev_stats(struct btrfs_root *root,
- struct btrfs_ioctl_get_dev_stats *stats,
- int reset_after_read);
+ struct btrfs_ioctl_get_dev_stats *stats);
int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info);
int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
diff --git a/fs/inode.c b/fs/inode.c
index 775cbabd4fa..3cc50432046 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -1551,6 +1551,8 @@ void touch_atime(struct path *path)
* Btrfs), but since we touch atime while walking down the path we
* really don't care if we failed to update the atime of the file,
* so just ignore the return value.
+ * We may also fail on filesystems that have the ability to make parts
+ * of the fs read only, e.g. subvolumes in Btrfs.
*/
update_time(inode, &now, S_ATIME);
mnt_drop_write(mnt);