summaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c227
1 files changed, 83 insertions, 144 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 6fa530256bf..5930e382959 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -156,7 +156,7 @@ void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
if (uptodate) {
set_buffer_uptodate(bh);
} else {
- if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
+ if (!quiet_error(bh)) {
buffer_io_error(bh);
printk(KERN_WARNING "lost page write due to "
"I/O error on %s\n",
@@ -275,6 +275,7 @@ void invalidate_bdev(struct block_device *bdev)
return;
invalidate_bh_lrus();
+ lru_add_drain_all(); /* make sure all lru add caches are flushed */
invalidate_mapping_pages(mapping, 0, -1);
}
EXPORT_SYMBOL(invalidate_bdev);
@@ -560,26 +561,17 @@ repeat:
return err;
}
-static void do_thaw_all(struct work_struct *work)
+static void do_thaw_one(struct super_block *sb, void *unused)
{
- struct super_block *sb;
char b[BDEVNAME_SIZE];
+ while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
+ printk(KERN_WARNING "Emergency Thaw on %s\n",
+ bdevname(sb->s_bdev, b));
+}
- spin_lock(&sb_lock);
-restart:
- list_for_each_entry(sb, &super_blocks, s_list) {
- sb->s_count++;
- spin_unlock(&sb_lock);
- down_read(&sb->s_umount);
- while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
- printk(KERN_WARNING "Emergency Thaw on %s\n",
- bdevname(sb->s_bdev, b));
- up_read(&sb->s_umount);
- spin_lock(&sb_lock);
- if (__put_super_and_need_restart(sb))
- goto restart;
- }
- spin_unlock(&sb_lock);
+static void do_thaw_all(struct work_struct *work)
+{
+ iterate_supers(do_thaw_one, NULL);
kfree(work);
printk(KERN_WARNING "Emergency Thaw complete\n");
}
@@ -778,11 +770,12 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
spin_unlock(lock);
/*
* Ensure any pending I/O completes so that
- * ll_rw_block() actually writes the current
- * contents - it is a noop if I/O is still in
- * flight on potentially older contents.
+ * write_dirty_buffer() actually writes the
+ * current contents - it is a noop if I/O is
+ * still in flight on potentially older
+ * contents.
*/
- ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh);
+ write_dirty_buffer(bh, WRITE_SYNC_PLUG);
/*
* Kick off IO for the previous mapping. Note
@@ -912,7 +905,6 @@ try_again:
bh->b_state = 0;
atomic_set(&bh->b_count, 0);
- bh->b_private = NULL;
bh->b_size = size;
/* Link the buffer to its page */
@@ -1713,7 +1705,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
* and kswapd activity, but those code paths have their own
* higher-level throttling.
*/
- if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
+ if (wbc->sync_mode != WB_SYNC_NONE) {
lock_buffer(bh);
} else if (!trylock_buffer(bh)) {
redirty_page_for_writepage(wbc, page);
@@ -1841,9 +1833,12 @@ void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
}
EXPORT_SYMBOL(page_zero_new_buffers);
-static int __block_prepare_write(struct inode *inode, struct page *page,
- unsigned from, unsigned to, get_block_t *get_block)
+int __block_write_begin(struct page *page, loff_t pos, unsigned len,
+ get_block_t *get_block)
{
+ unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned to = from + len;
+ struct inode *inode = page->mapping->host;
unsigned block_start, block_end;
sector_t block;
int err = 0;
@@ -1916,10 +1911,13 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
if (!buffer_uptodate(*wait_bh))
err = -EIO;
}
- if (unlikely(err))
+ if (unlikely(err)) {
page_zero_new_buffers(page, from, to);
+ ClearPageUptodate(page);
+ }
return err;
}
+EXPORT_SYMBOL(__block_write_begin);
static int __block_commit_write(struct inode *inode, struct page *page,
unsigned from, unsigned to)
@@ -1960,58 +1958,27 @@ static int __block_commit_write(struct inode *inode, struct page *page,
* block_write_begin takes care of the basic task of block allocation and
* bringing partial write blocks uptodate first.
*
- * If *pagep is not NULL, then block_write_begin uses the locked page
- * at *pagep rather than allocating its own. In this case, the page will
- * not be unlocked or deallocated on failure.
+ * The filesystem needs to handle block truncation upon failure.
*/
-int block_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata,
- get_block_t *get_block)
+int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
+ unsigned flags, struct page **pagep, get_block_t *get_block)
{
- struct inode *inode = mapping->host;
- int status = 0;
+ pgoff_t index = pos >> PAGE_CACHE_SHIFT;
struct page *page;
- pgoff_t index;
- unsigned start, end;
- int ownpage = 0;
+ int status;
- index = pos >> PAGE_CACHE_SHIFT;
- start = pos & (PAGE_CACHE_SIZE - 1);
- end = start + len;
-
- page = *pagep;
- if (page == NULL) {
- ownpage = 1;
- page = grab_cache_page_write_begin(mapping, index, flags);
- if (!page) {
- status = -ENOMEM;
- goto out;
- }
- *pagep = page;
- } else
- BUG_ON(!PageLocked(page));
+ page = grab_cache_page_write_begin(mapping, index, flags);
+ if (!page)
+ return -ENOMEM;
- status = __block_prepare_write(inode, page, start, end, get_block);
+ status = __block_write_begin(page, pos, len, get_block);
if (unlikely(status)) {
- ClearPageUptodate(page);
-
- if (ownpage) {
- unlock_page(page);
- page_cache_release(page);
- *pagep = NULL;
-
- /*
- * prepare_write() may have instantiated a few blocks
- * outside i_size. Trim these off again. Don't need
- * i_size_read because we hold i_mutex.
- */
- if (pos + len > inode->i_size)
- vmtruncate(inode, inode->i_size);
- }
+ unlock_page(page);
+ page_cache_release(page);
+ page = NULL;
}
-out:
+ *pagep = page;
return status;
}
EXPORT_SYMBOL(block_write_begin);
@@ -2344,7 +2311,7 @@ int cont_write_begin(struct file *file, struct address_space *mapping,
err = cont_expand_zero(file, mapping, pos, bytes);
if (err)
- goto out;
+ return err;
zerofrom = *bytes & ~PAGE_CACHE_MASK;
if (pos+len > *bytes && zerofrom & (blocksize-1)) {
@@ -2352,25 +2319,10 @@ int cont_write_begin(struct file *file, struct address_space *mapping,
(*bytes)++;
}
- *pagep = NULL;
- err = block_write_begin(file, mapping, pos, len,
- flags, pagep, fsdata, get_block);
-out:
- return err;
+ return block_write_begin(mapping, pos, len, flags, pagep, get_block);
}
EXPORT_SYMBOL(cont_write_begin);
-int block_prepare_write(struct page *page, unsigned from, unsigned to,
- get_block_t *get_block)
-{
- struct inode *inode = page->mapping->host;
- int err = __block_prepare_write(inode, page, from, to, get_block);
- if (err)
- ClearPageUptodate(page);
- return err;
-}
-EXPORT_SYMBOL(block_prepare_write);
-
int block_commit_write(struct page *page, unsigned from, unsigned to)
{
struct inode *inode = page->mapping->host;
@@ -2389,7 +2341,7 @@ EXPORT_SYMBOL(block_commit_write);
*
* We are not allowed to take the i_mutex here so we have to play games to
* protect against truncate races as the page could now be beyond EOF. Because
- * vmtruncate() writes the inode size before removing pages, once we have the
+ * truncate writes the inode size before removing pages, once we have the
* page lock we can determine safely if the page is beyond EOF. If it is not
* beyond EOF, then the page is guaranteed safe against truncation until we
* unlock the page.
@@ -2419,7 +2371,7 @@ block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
else
end = PAGE_CACHE_SIZE;
- ret = block_prepare_write(page, 0, end, get_block);
+ ret = __block_write_begin(page, 0, end, get_block);
if (!ret)
ret = block_commit_write(page, 0, end);
@@ -2474,8 +2426,9 @@ static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
/*
* On entry, the page is fully not uptodate.
* On exit the page is fully uptodate in the areas outside (from,to)
+ * The filesystem needs to handle block truncation upon failure.
*/
-int nobh_write_begin(struct file *file, struct address_space *mapping,
+int nobh_write_begin(struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata,
get_block_t *get_block)
@@ -2505,11 +2458,10 @@ int nobh_write_begin(struct file *file, struct address_space *mapping,
*fsdata = NULL;
if (page_has_buffers(page)) {
- unlock_page(page);
- page_cache_release(page);
- *pagep = NULL;
- return block_write_begin(file, mapping, pos, len, flags, pagep,
- fsdata, get_block);
+ ret = __block_write_begin(page, pos, len, get_block);
+ if (unlikely(ret))
+ goto out_release;
+ return ret;
}
if (PageMappedToDisk(page))
@@ -2613,9 +2565,6 @@ out_release:
page_cache_release(page);
*pagep = NULL;
- if (pos + len > inode->i_size)
- vmtruncate(inode, inode->i_size);
-
return ret;
}
EXPORT_SYMBOL(nobh_write_begin);
@@ -2893,7 +2842,7 @@ int block_write_full_page_endio(struct page *page, get_block_t *get_block,
/*
* The page straddles i_size. It must be zeroed out on each and every
- * writepage invokation because it may be mmapped. "A file is mapped
+ * writepage invocation because it may be mmapped. "A file is mapped
* in multiples of the page size. For a file that is not a multiple of
* the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file."
@@ -2933,7 +2882,6 @@ static void end_bio_bh_io_sync(struct bio *bio, int err)
if (err == -EOPNOTSUPP) {
set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
- set_bit(BH_Eopnotsupp, &bh->b_state);
}
if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
@@ -2955,13 +2903,6 @@ int submit_bh(int rw, struct buffer_head * bh)
BUG_ON(buffer_unwritten(bh));
/*
- * Mask in barrier bit for a write (could be either a WRITE or a
- * WRITE_SYNC
- */
- if (buffer_ordered(bh) && (rw & WRITE))
- rw |= WRITE_BARRIER;
-
- /*
* Only clear out a write error when rewriting
*/
if (test_set_buffer_req(bh) && (rw & WRITE))
@@ -2999,22 +2940,21 @@ EXPORT_SYMBOL(submit_bh);
/**
* ll_rw_block: low-level access to block devices (DEPRECATED)
- * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
+ * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
* @nr: number of &struct buffer_heads in the array
* @bhs: array of pointers to &struct buffer_head
*
* ll_rw_block() takes an array of pointers to &struct buffer_heads, and
* requests an I/O operation on them, either a %READ or a %WRITE. The third
- * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
- * are sent to disk. The fourth %READA option is described in the documentation
- * for generic_make_request() which ll_rw_block() calls.
+ * %READA option is described in the documentation for generic_make_request()
+ * which ll_rw_block() calls.
*
* This function drops any buffer that it cannot get a lock on (with the
- * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
- * clean when doing a write request, and any buffer that appears to be
- * up-to-date when doing read request. Further it marks as clean buffers that
- * are processed for writing (the buffer cache won't assume that they are
- * actually clean until the buffer gets unlocked).
+ * BH_Lock state bit), any buffer that appears to be clean when doing a write
+ * request, and any buffer that appears to be up-to-date when doing read
+ * request. Further it marks as clean buffers that are processed for
+ * writing (the buffer cache won't assume that they are actually clean
+ * until the buffer gets unlocked).
*
* ll_rw_block sets b_end_io to simple completion handler that marks
* the buffer up-to-date (if approriate), unlocks the buffer and wakes
@@ -3030,20 +2970,13 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
for (i = 0; i < nr; i++) {
struct buffer_head *bh = bhs[i];
- if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG)
- lock_buffer(bh);
- else if (!trylock_buffer(bh))
+ if (!trylock_buffer(bh))
continue;
-
- if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC ||
- rw == SWRITE_SYNC_PLUG) {
+ if (rw == WRITE) {
if (test_clear_buffer_dirty(bh)) {
bh->b_end_io = end_buffer_write_sync;
get_bh(bh);
- if (rw == SWRITE_SYNC)
- submit_bh(WRITE_SYNC, bh);
- else
- submit_bh(WRITE, bh);
+ submit_bh(WRITE, bh);
continue;
}
} else {
@@ -3059,12 +2992,25 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
}
EXPORT_SYMBOL(ll_rw_block);
+void write_dirty_buffer(struct buffer_head *bh, int rw)
+{
+ lock_buffer(bh);
+ if (!test_clear_buffer_dirty(bh)) {
+ unlock_buffer(bh);
+ return;
+ }
+ bh->b_end_io = end_buffer_write_sync;
+ get_bh(bh);
+ submit_bh(rw, bh);
+}
+EXPORT_SYMBOL(write_dirty_buffer);
+
/*
* For a data-integrity writeout, we need to wait upon any in-progress I/O
* and then start new I/O and then wait upon it. The caller must have a ref on
* the buffer_head.
*/
-int sync_dirty_buffer(struct buffer_head *bh)
+int __sync_dirty_buffer(struct buffer_head *bh, int rw)
{
int ret = 0;
@@ -3073,12 +3019,8 @@ int sync_dirty_buffer(struct buffer_head *bh)
if (test_clear_buffer_dirty(bh)) {
get_bh(bh);
bh->b_end_io = end_buffer_write_sync;
- ret = submit_bh(WRITE_SYNC, bh);
+ ret = submit_bh(rw, bh);
wait_on_buffer(bh);
- if (buffer_eopnotsupp(bh)) {
- clear_buffer_eopnotsupp(bh);
- ret = -EOPNOTSUPP;
- }
if (!ret && !buffer_uptodate(bh))
ret = -EIO;
} else {
@@ -3086,6 +3028,12 @@ int sync_dirty_buffer(struct buffer_head *bh)
}
return ret;
}
+EXPORT_SYMBOL(__sync_dirty_buffer);
+
+int sync_dirty_buffer(struct buffer_head *bh)
+{
+ return __sync_dirty_buffer(bh, WRITE_SYNC);
+}
EXPORT_SYMBOL(sync_dirty_buffer);
/*
@@ -3265,7 +3213,7 @@ static void recalc_bh_state(void)
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
{
- struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
+ struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
if (ret) {
INIT_LIST_HEAD(&ret->b_assoc_buffers);
get_cpu_var(bh_accounting).nr++;
@@ -3352,15 +3300,6 @@ int bh_submit_read(struct buffer_head *bh)
}
EXPORT_SYMBOL(bh_submit_read);
-static void
-init_buffer_head(void *data)
-{
- struct buffer_head *bh = data;
-
- memset(bh, 0, sizeof(*bh));
- INIT_LIST_HEAD(&bh->b_assoc_buffers);
-}
-
void __init buffer_init(void)
{
int nrpages;
@@ -3369,7 +3308,7 @@ void __init buffer_init(void)
sizeof(struct buffer_head), 0,
(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
SLAB_MEM_SPREAD),
- init_buffer_head);
+ NULL);
/*
* Limit the bh occupancy to 10% of ZONE_NORMAL