summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-12-15 11:44:56 -0500
committerChris Mason <chris.mason@oracle.com>2008-12-15 11:44:56 -0500
commit42dc7babdcc99feadb04d461592ce5898a362550 (patch)
tree9497d6dddba1a046b4a40a51a49cc41a13922394
parent17d217fe970d34720f4f1633dca73a6aa2f3d9d1 (diff)
Btrfs: Fix compressed writes on truncated pages
The compression code was using isize to limit the amount of data it sent through zlib. But, it wasn't properly limiting the looping to just the pages inside i_size. The end result was trying to compress too many pages, including those that had not been setup and properly locked down. This made the compression code oops while trying find_get_page on a page that didn't exist. Signed-off-by: Chris Mason <chris.mason@oracle.com>
-rw-r--r--fs/btrfs/ctree.c2
-rw-r--r--fs/btrfs/inode.c8
2 files changed, 6 insertions, 4 deletions
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index c0c95cccbb5..f6f7a6af035 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -2887,8 +2887,8 @@ int btrfs_split_item(struct btrfs_trans_handle *trans,
path->keep_locks = 0;
BUG_ON(ret);
- BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
leaf = path->nodes[0];
+ BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
split:
item = btrfs_item_nr(leaf, path->slots[0]);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index e64a4fe19a6..5313a13a998 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -332,6 +332,7 @@ static noinline int compress_file_range(struct inode *inode,
u64 disk_num_bytes;
u64 blocksize = root->sectorsize;
u64 actual_end;
+ u64 isize = i_size_read(inode);
int ret = 0;
struct page **pages = NULL;
unsigned long nr_pages;
@@ -345,12 +346,12 @@ static noinline int compress_file_range(struct inode *inode,
orig_start = start;
+ actual_end = min_t(u64, isize, end + 1);
again:
will_compress = 0;
nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
- actual_end = min_t(u64, i_size_read(inode), end + 1);
total_compressed = actual_end - start;
/* we want to make sure that amount of ram required to uncompress
@@ -488,7 +489,7 @@ again:
add_async_extent(async_cow, start, num_bytes,
total_compressed, pages, nr_pages_ret);
- if (start + num_bytes < end) {
+ if (start + num_bytes < end && start + num_bytes < actual_end) {
start += num_bytes;
pages = NULL;
cond_resched();
@@ -696,6 +697,7 @@ static noinline int cow_file_range(struct inode *inode,
u64 cur_alloc_size;
u64 blocksize = root->sectorsize;
u64 actual_end;
+ u64 isize = i_size_read(inode);
struct btrfs_key ins;
struct extent_map *em;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
@@ -705,7 +707,7 @@ static noinline int cow_file_range(struct inode *inode,
BUG_ON(!trans);
btrfs_set_trans_block_group(trans, inode);
- actual_end = min_t(u64, i_size_read(inode), end + 1);
+ actual_end = min_t(u64, isize, end + 1);
num_bytes = (end - start + blocksize) & ~(blocksize - 1);
num_bytes = max(blocksize, num_bytes);