diff options
author | Jan Kara <jack@suse.cz> | 2012-06-12 16:20:37 +0200 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2012-07-31 09:45:47 +0400 |
commit | 14da9200140f8d722ad1767dfabadebd8b34f2ad (patch) | |
tree | ea5d88b091999f7a64af0b9d335d7cad4c79edfb /mm/filemap.c | |
parent | 5d37e9e6dec65cd21be68ee92de99686213e916b (diff) |
fs: Protect write paths by sb_start_write - sb_end_write
There are several entry points which dirty pages in a filesystem. mmap
(handled by block_page_mkwrite()), buffered write (handled by
__generic_file_aio_write()), splice write (generic_file_splice_write),
truncate, and fallocate (these can dirty last partial page - handled inside
each filesystem separately). Protect these places with sb_start_write() and
sb_end_write().
->page_mkwrite() calls are particularly complex since they are called with
mmap_sem held and thus we cannot use standard sb_start_write() due to lock
ordering constraints. We solve the problem by using a special freeze protection
sb_start_pagefault() which ranks below mmap_sem.
BugLink: https://bugs.launchpad.net/bugs/897421
Tested-by: Kamal Mostafa <kamal@canonical.com>
Tested-by: Peter M. Petrakis <peter.petrakis@canonical.com>
Tested-by: Dann Frazier <dann.frazier@canonical.com>
Tested-by: Massimo Morana <massimo.morana@canonical.com>
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'mm/filemap.c')
-rw-r--r-- | mm/filemap.c | 12 |
1 files changed, 10 insertions, 2 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 51efee65c2c..fa5ca304148 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1718,6 +1718,7 @@ int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) struct inode *inode = vma->vm_file->f_path.dentry->d_inode; int ret = VM_FAULT_LOCKED; + sb_start_pagefault(inode->i_sb); file_update_time(vma->vm_file); lock_page(page); if (page->mapping != inode->i_mapping) { @@ -1725,7 +1726,14 @@ int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) ret = VM_FAULT_NOPAGE; goto out; } + /* + * We mark the page dirty already here so that when freeze is in + * progress, we are guaranteed that writeback during freezing will + * see the dirty page and writeprotect it again. + */ + set_page_dirty(page); out: + sb_end_pagefault(inode->i_sb); return ret; } EXPORT_SYMBOL(filemap_page_mkwrite); @@ -2426,8 +2434,6 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov, count = ocount; pos = *ppos; - vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); - /* We can write back this queue in page reclaim */ current->backing_dev_info = mapping->backing_dev_info; written = 0; @@ -2526,6 +2532,7 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov, BUG_ON(iocb->ki_pos != pos); + sb_start_write(inode->i_sb); mutex_lock(&inode->i_mutex); blk_start_plug(&plug); ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos); @@ -2539,6 +2546,7 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov, ret = err; } blk_finish_plug(&plug); + sb_end_write(inode->i_sb); return ret; } EXPORT_SYMBOL(generic_file_aio_write); |