From 24da4fab5a617ecbf0f0c64e7ba7703383faa411 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 24 May 2011 00:23:34 +0200 Subject: vfs: Create __block_page_mkwrite() helper passing error values back Create __block_page_mkwrite() helper which does all what block_page_mkwrite() does except that it passes back errors from __block_write_begin / block_commit_write calls. Reviewed-by: Christoph Hellwig Signed-off-by: Jan Kara Signed-off-by: Al Viro --- fs/buffer.c | 37 ++++++++++++++++++++----------------- 1 file changed, 20 insertions(+), 17 deletions(-) (limited to 'fs/buffer.c') diff --git a/fs/buffer.c b/fs/buffer.c index a08bb8e61c6..f6ad8f9b8fa 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2332,23 +2332,22 @@ EXPORT_SYMBOL(block_commit_write); * beyond EOF, then the page is guaranteed safe against truncation until we * unlock the page. */ -int -block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, - get_block_t get_block) +int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, + get_block_t get_block) { struct page *page = vmf->page; struct inode *inode = vma->vm_file->f_path.dentry->d_inode; unsigned long end; loff_t size; - int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */ + int ret; lock_page(page); size = i_size_read(inode); if ((page->mapping != inode->i_mapping) || (page_offset(page) > size)) { - /* page got truncated out from underneath us */ - unlock_page(page); - goto out; + /* We overload EFAULT to mean page got truncated */ + ret = -EFAULT; + goto out_unlock; } /* page is wholly or partially inside EOF */ @@ -2361,18 +2360,22 @@ block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, if (!ret) ret = block_commit_write(page, 0, end); - if (unlikely(ret)) { - unlock_page(page); - if (ret == -ENOMEM) - ret = VM_FAULT_OOM; - else /* -ENOSPC, -EIO, etc */ - ret = VM_FAULT_SIGBUS; - } else - ret = VM_FAULT_LOCKED; - -out: + if (unlikely(ret < 0)) + goto out_unlock; + return 0; +out_unlock: + unlock_page(page); return ret; } +EXPORT_SYMBOL(__block_page_mkwrite); + +int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, + get_block_t get_block) +{ + int ret = __block_page_mkwrite(vma, vmf, get_block); + + return block_page_mkwrite_return(ret); +} EXPORT_SYMBOL(block_page_mkwrite); /* -- cgit v1.2.3-70-g09d2 From ea13a86463fd0c26c2c209c53dc46b8eff81bad4 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 24 May 2011 00:23:35 +0200 Subject: vfs: Block mmapped writes while the fs is frozen We should not allow file modification via mmap while the filesystem is frozen. So block in block_page_mkwrite() while the filesystem is frozen. We cannot do the blocking wait in __block_page_mkwrite() since e.g. ext4 will want to call that function with transaction started in some cases and that would deadlock. But we can at least do the non-blocking reliable check in __block_page_mkwrite() which is the hardest part anyway. We have to check for frozen filesystem with the page marked dirty and under page lock with which we then return from ->page_mkwrite(). Only that way we cannot race with writeback done by freezing code - either we mark the page dirty after the writeback has started, see freezing in progress and block, or writeback will wait for our page lock which is released only when the fault is done and then writeback will writeout and writeprotect the page again. Reviewed-by: Christoph Hellwig Signed-off-by: Jan Kara Signed-off-by: Al Viro --- fs/buffer.c | 24 +++++++++++++++++++++++- include/linux/buffer_head.h | 2 ++ 2 files changed, 25 insertions(+), 1 deletion(-) (limited to 'fs/buffer.c') diff --git a/fs/buffer.c b/fs/buffer.c index f6ad8f9b8fa..b0675bfe820 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2331,6 +2331,9 @@ EXPORT_SYMBOL(block_commit_write); * page lock we can determine safely if the page is beyond EOF. If it is not * beyond EOF, then the page is guaranteed safe against truncation until we * unlock the page. + * + * Direct callers of this function should call vfs_check_frozen() so that page + * fault does not busyloop until the fs is thawed. */ int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, get_block_t get_block) @@ -2362,6 +2365,18 @@ int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, if (unlikely(ret < 0)) goto out_unlock; + /* + * Freezing in progress? We check after the page is marked dirty and + * with page lock held so if the test here fails, we are sure freezing + * code will wait during syncing until the page fault is done - at that + * point page will be dirty and unlocked so freezing code will write it + * and writeprotect it again. + */ + set_page_dirty(page); + if (inode->i_sb->s_frozen != SB_UNFROZEN) { + ret = -EAGAIN; + goto out_unlock; + } return 0; out_unlock: unlock_page(page); @@ -2372,8 +2387,15 @@ EXPORT_SYMBOL(__block_page_mkwrite); int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, get_block_t get_block) { - int ret = __block_page_mkwrite(vma, vmf, get_block); + int ret; + struct super_block *sb = vma->vm_file->f_path.dentry->d_inode->i_sb; + /* + * This check is racy but catches the common case. The check in + * __block_page_mkwrite() is reliable. + */ + vfs_check_frozen(sb, SB_FREEZE_WRITE); + ret = __block_page_mkwrite(vma, vmf, get_block); return block_page_mkwrite_return(ret); } EXPORT_SYMBOL(block_page_mkwrite); diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 2bf6a9136a9..503c8a6b307 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -230,6 +230,8 @@ static inline int block_page_mkwrite_return(int err) return VM_FAULT_NOPAGE; if (err == -ENOMEM) return VM_FAULT_OOM; + if (err == -EAGAIN) + return VM_FAULT_RETRY; /* -ENOSPC, -EDQUOT, -EIO ... */ return VM_FAULT_SIGBUS; } -- cgit v1.2.3-70-g09d2 From c515e1fd361c2a08a9c2eb139396ec30a4f477dc Mon Sep 17 00:00:00 2001 From: Dan Magenheimer Date: Thu, 26 May 2011 10:01:43 -0600 Subject: mm/fs: add hooks to support cleancache This fourth patch of eight in this cleancache series provides the core hooks in VFS for: initializing cleancache per filesystem; capturing clean pages reclaimed by page cache; attempting to get pages from cleancache before filesystem read; and ensuring coherency between pagecache, disk, and cleancache. Note that the placement of these hooks was stable from 2.6.18 to 2.6.38; a minor semantic change was required due to a patchset in 2.6.39. All hooks become no-ops if CONFIG_CLEANCACHE is unset, or become a check of a boolean global if CONFIG_CLEANCACHE is set but no cleancache "backend" has claimed cleancache_ops. Details and a FAQ can be found in Documentation/vm/cleancache.txt [v8: minchan.kim@gmail.com: adapt to new remove_from_page_cache function] Signed-off-by: Chris Mason Signed-off-by: Dan Magenheimer Reviewed-by: Jeremy Fitzhardinge Reviewed-by: Konrad Rzeszutek Wilk Cc: Andrew Morton Cc: Al Viro Cc: Matthew Wilcox Cc: Nick Piggin Cc: Mel Gorman Cc: Rik Van Riel Cc: Jan Beulich Cc: Andreas Dilger Cc: Ted Ts'o Cc: Mark Fasheh Cc: Joel Becker Cc: Nitin Gupta --- fs/buffer.c | 5 +++++ fs/mpage.c | 7 +++++++ fs/super.c | 3 +++ mm/filemap.c | 11 +++++++++++ mm/truncate.c | 6 ++++++ 5 files changed, 32 insertions(+) (limited to 'fs/buffer.c') diff --git a/fs/buffer.c b/fs/buffer.c index a08bb8e61c6..de05703b184 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -41,6 +41,7 @@ #include #include #include +#include static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); @@ -269,6 +270,10 @@ void invalidate_bdev(struct block_device *bdev) invalidate_bh_lrus(); lru_add_drain_all(); /* make sure all lru add caches are flushed */ invalidate_mapping_pages(mapping, 0, -1); + /* 99% of the time, we don't need to flush the cleancache on the bdev. + * But, for the strange corners, lets be cautious + */ + cleancache_flush_inode(mapping); } EXPORT_SYMBOL(invalidate_bdev); diff --git a/fs/mpage.c b/fs/mpage.c index 0afc809e46e..fdfae9fa98c 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -27,6 +27,7 @@ #include #include #include +#include /* * I/O completion handler for multipage BIOs. @@ -271,6 +272,12 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, SetPageMappedToDisk(page); } + if (fully_mapped && blocks_per_page == 1 && !PageUptodate(page) && + cleancache_get_page(page) == 0) { + SetPageUptodate(page); + goto confused; + } + /* * This page will go to BIO. Do we need to send this BIO off first? */ diff --git a/fs/super.c b/fs/super.c index 8a06881b192..b383fa40774 100644 --- a/fs/super.c +++ b/fs/super.c @@ -31,6 +31,7 @@ #include #include #include +#include #include "internal.h" @@ -112,6 +113,7 @@ static struct super_block *alloc_super(struct file_system_type *type) s->s_maxbytes = MAX_NON_LFS; s->s_op = &default_op; s->s_time_gran = 1000000000; + s->cleancache_poolid = -1; } out: return s; @@ -177,6 +179,7 @@ void deactivate_locked_super(struct super_block *s) { struct file_system_type *fs = s->s_type; if (atomic_dec_and_test(&s->s_active)) { + cleancache_flush_fs(s); fs->kill_sb(s); /* * We need to call rcu_barrier so all the delayed rcu free diff --git a/mm/filemap.c b/mm/filemap.c index c641edf553a..ec6fa2d7e20 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -34,6 +34,7 @@ #include /* for BUG_ON(!in_atomic()) only */ #include #include /* for page_is_file_cache() */ +#include #include "internal.h" /* @@ -118,6 +119,16 @@ void __delete_from_page_cache(struct page *page) { struct address_space *mapping = page->mapping; + /* + * if we're uptodate, flush out into the cleancache, otherwise + * invalidate any existing cleancache entries. We can't leave + * stale data around in the cleancache once our page is gone + */ + if (PageUptodate(page) && PageMappedToDisk(page)) + cleancache_put_page(page); + else + cleancache_flush_page(mapping, page); + radix_tree_delete(&mapping->page_tree, page->index); page->mapping = NULL; mapping->nrpages--; diff --git a/mm/truncate.c b/mm/truncate.c index a9566752913..3a29a618021 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -19,6 +19,7 @@ #include #include /* grr. try_to_release_page, do_invalidatepage */ +#include #include "internal.h" @@ -51,6 +52,7 @@ void do_invalidatepage(struct page *page, unsigned long offset) static inline void truncate_partial_page(struct page *page, unsigned partial) { zero_user_segment(page, partial, PAGE_CACHE_SIZE); + cleancache_flush_page(page->mapping, page); if (page_has_private(page)) do_invalidatepage(page, partial); } @@ -214,6 +216,7 @@ void truncate_inode_pages_range(struct address_space *mapping, pgoff_t next; int i; + cleancache_flush_inode(mapping); if (mapping->nrpages == 0) return; @@ -291,6 +294,7 @@ void truncate_inode_pages_range(struct address_space *mapping, pagevec_release(&pvec); mem_cgroup_uncharge_end(); } + cleancache_flush_inode(mapping); } EXPORT_SYMBOL(truncate_inode_pages_range); @@ -440,6 +444,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping, int did_range_unmap = 0; int wrapped = 0; + cleancache_flush_inode(mapping); pagevec_init(&pvec, 0); next = start; while (next <= end && !wrapped && @@ -498,6 +503,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping, mem_cgroup_uncharge_end(); cond_resched(); } + cleancache_flush_inode(mapping); return ret; } EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range); -- cgit v1.2.3-70-g09d2 From d76ee18a8551e33ad7dbd55cac38bc7b094f3abb Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Fri, 27 May 2011 12:23:41 -0700 Subject: fs: block_page_mkwrite should wait for writeback to finish For filesystems such as nilfs2 and xfs that use block_page_mkwrite, modify that function to wait for pending writeback before allowing the page to become writable. This is needed to stabilize pages during writeback for those two filesystems. Signed-off-by: Darrick J. Wong Signed-off-by: Al Viro --- fs/buffer.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs/buffer.c') diff --git a/fs/buffer.c b/fs/buffer.c index 698c6b2cc46..49c9aada037 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2382,6 +2382,7 @@ int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, ret = -EAGAIN; goto out_unlock; } + wait_on_page_writeback(page); return 0; out_unlock: unlock_page(page); -- cgit v1.2.3-70-g09d2 From f9f07b6c1372b1436aa6b45333445b443ffd8c95 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 14 Jun 2011 00:58:27 +0200 Subject: vfs: Fix data corruption after failed write in __block_write_begin() I've got a report of a file corruption from fsxlinux on ext3. The important operations to the page were: mapwrite to a hole partial write to the page read - found the page zeroed from the end of the normal write The culprit seems to be that if get_block() fails in __block_write_begin() (e.g. transient ENOSPC in ext3), the function does ClearPageUptodate(page). Thus when we retry the write, the logic in __block_write_begin() thinks zeroing of the page is needed and overwrites old data. In fact, I don't see why we should ever need to zero the uptodate bit here - either the page was uptodate when we entered __block_write_begin() and it should stay so when we leave it, or it was not uptodate and noone had right to set it uptodate during __block_write_begin() so it remains !uptodate when we leave as well. So just remove clearing of the bit. Signed-off-by: Jan Kara Signed-off-by: Al Viro --- fs/buffer.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'fs/buffer.c') diff --git a/fs/buffer.c b/fs/buffer.c index 49c9aada037..1a80b048ade 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -1902,10 +1902,8 @@ int __block_write_begin(struct page *page, loff_t pos, unsigned len, if (!buffer_uptodate(*wait_bh)) err = -EIO; } - if (unlikely(err)) { + if (unlikely(err)) page_zero_new_buffers(page, from, to); - ClearPageUptodate(page); - } return err; } EXPORT_SYMBOL(__block_write_begin); -- cgit v1.2.3-70-g09d2