summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/file.c
diff options
context:
space:
mode:
authorYan Zheng <zheng.yan@oracle.com>2008-10-30 14:19:41 -0400
committerChris Mason <chris.mason@oracle.com>2008-10-30 14:19:41 -0400
commit9036c10208e1fc496cef7692ba66a78699b360dc (patch)
treea5b272158acc0e01e71731f5ccbc895a8eee1151 /fs/btrfs/file.c
parent19b9bdb054895ba07086f0264641c9f80e0eb2c4 (diff)
Btrfs: update hole handling v2
This patch splits the hole insertion code out of btrfs_setattr into btrfs_cont_expand and updates btrfs_get_extent to properly handle the case that file extent items are not continuous. Signed-off-by: Yan Zheng <zheng.yan@oracle.com>
Diffstat (limited to 'fs/btrfs/file.c')
-rw-r--r--fs/btrfs/file.c41
1 files changed, 6 insertions, 35 deletions
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 0aa15436590..b8a7637e14a 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -142,40 +142,6 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
}
set_extent_uptodate(io_tree, start_pos, end_of_last_block, GFP_NOFS);
- /* FIXME...EIEIO, ENOSPC and more */
- /* insert any holes we need to create */
- if (isize < start_pos) {
- u64 last_pos_in_file;
- u64 hole_size;
- u64 mask = root->sectorsize - 1;
- last_pos_in_file = (isize + mask) & ~mask;
- hole_size = (start_pos - last_pos_in_file + mask) & ~mask;
- if (hole_size > 0) {
- btrfs_wait_ordered_range(inode, last_pos_in_file,
- last_pos_in_file + hole_size);
- mutex_lock(&BTRFS_I(inode)->extent_mutex);
- err = btrfs_drop_extents(trans, root, inode,
- last_pos_in_file,
- last_pos_in_file + hole_size,
- last_pos_in_file,
- &hint_byte);
- if (err)
- goto failed;
-
- err = btrfs_insert_file_extent(trans, root,
- inode->i_ino,
- last_pos_in_file,
- 0, 0, hole_size, 0,
- hole_size, 0, 0, 0);
- btrfs_drop_extent_cache(inode, last_pos_in_file,
- last_pos_in_file + hole_size - 1, 0);
- mutex_unlock(&BTRFS_I(inode)->extent_mutex);
- btrfs_check_file(root, inode);
- }
- if (err)
- goto failed;
- }
-
/* check for reserved extents on each page, we don't want
* to reset the delalloc bit on things that already have
* extents reserved.
@@ -191,7 +157,6 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
i_size_write(inode, end_pos);
btrfs_update_inode(trans, root, inode);
}
-failed:
err = btrfs_end_transaction(trans, root);
out_unlock:
unlock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
@@ -697,6 +662,12 @@ static int noinline prepare_pages(struct btrfs_root *root, struct file *file,
start_pos = pos & ~((u64)root->sectorsize - 1);
last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
+ if (start_pos > inode->i_size) {
+ err = btrfs_cont_expand(inode, start_pos);
+ if (err)
+ return err;
+ }
+
memset(pages, 0, num_pages * sizeof(struct page *));
again:
for (i = 0; i < num_pages; i++) {