summaryrefslogtreecommitdiffstats
path: root/fs/fuse
diff options
context:
space:
mode:
authorMaxim Patlasov <MPatlasov@parallels.com>2013-08-16 15:51:41 +0400
committerMiklos Szeredi <mszeredi@suse.cz>2013-10-01 16:44:53 +0200
commit2d033eaa0073d276ee6c324dd0ade0c5074a5542 (patch)
tree1d75adee60456c49cb680210fb85ed0dbd0cd639 /fs/fuse
parent26d614df1da9d7d255686af5d6d4508f77853c01 (diff)
fuse: fix race in fuse_writepages()
The patch fixes a race between ftruncate(2), mmap-ed write and write(2): 1) An user makes a page dirty via mmap-ed write. 2) The user performs shrinking truncate(2) intended to purge the page. 3) Before fuse_do_setattr calls truncate_pagecache, the page goes to writeback. fuse_writepages_fill attaches a new page to FUSE_WRITE request, then releases the original page by end_page_writeback and unlock it. 4) fuse_do_setattr completes and successfully returns. Since now, i_mutex is free. 5) Ordinary write(2) extends i_size back to cover the page. Note that fuse_send_write_pages do wait for fuse writeback, but for another page->index. 6) fuse_writepages_fill attaches more pages to the request (if any), then fuse_writepages_send is eventually called. It is supposed to crop inarg->size of the request, but it doesn't because i_size has already been extended back. Moving end_page_writeback behind fuse_writepages_send guarantees that __fuse_release_nowrite (called from fuse_do_setattr) will crop inarg->size of the request before write(2) gets the chance to extend i_size. Signed-off-by: Maxim Patlasov <mpatlasov@parallels.com> Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Diffstat (limited to 'fs/fuse')
-rw-r--r--fs/fuse/file.c17
1 files changed, 16 insertions, 1 deletions
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 0bd349dd968..cc3a6c4437e 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1590,6 +1590,7 @@ struct fuse_fill_wb_data {
struct fuse_req *req;
struct fuse_file *ff;
struct inode *inode;
+ struct page **orig_pages;
};
static void fuse_writepages_send(struct fuse_fill_wb_data *data)
@@ -1598,12 +1599,17 @@ static void fuse_writepages_send(struct fuse_fill_wb_data *data)
struct inode *inode = data->inode;
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
+ int num_pages = req->num_pages;
+ int i;
req->ff = fuse_file_get(data->ff);
spin_lock(&fc->lock);
list_add_tail(&req->list, &fi->queued_writes);
fuse_flush_writepages(inode);
spin_unlock(&fc->lock);
+
+ for (i = 0; i < num_pages; i++)
+ end_page_writeback(data->orig_pages[i]);
}
static int fuse_writepages_fill(struct page *page,
@@ -1684,7 +1690,7 @@ static int fuse_writepages_fill(struct page *page,
inc_bdi_stat(page->mapping->backing_dev_info, BDI_WRITEBACK);
inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
- end_page_writeback(page);
+ data->orig_pages[req->num_pages] = page;
/*
* Protected by fc->lock against concurrent access by
@@ -1716,6 +1722,13 @@ static int fuse_writepages(struct address_space *mapping,
data.req = NULL;
data.ff = NULL;
+ err = -ENOMEM;
+ data.orig_pages = kzalloc(sizeof(struct page *) *
+ FUSE_MAX_PAGES_PER_REQ,
+ GFP_NOFS);
+ if (!data.orig_pages)
+ goto out;
+
err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data);
if (data.req) {
/* Ignore errors if we can write at least one page */
@@ -1725,6 +1738,8 @@ static int fuse_writepages(struct address_space *mapping,
}
if (data.ff)
fuse_file_put(data.ff, false);
+
+ kfree(data.orig_pages);
out:
return err;
}