summaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c57
1 files changed, 36 insertions, 21 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 3ebccf4aa7e..3db4a26adc4 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -360,16 +360,19 @@ void invalidate_bdev(struct block_device *bdev)
*/
static void free_more_memory(void)
{
- struct zone **zones;
- pg_data_t *pgdat;
+ struct zone *zone;
+ int nid;
wakeup_pdflush(1024);
yield();
- for_each_online_pgdat(pgdat) {
- zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
- if (*zones)
- try_to_free_pages(zones, 0, GFP_NOFS);
+ for_each_online_node(nid) {
+ (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
+ gfp_zone(GFP_NOFS), NULL,
+ &zone);
+ if (zone)
+ try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
+ GFP_NOFS);
}
}
@@ -627,8 +630,7 @@ repeat:
}
/**
- * sync_mapping_buffers - write out and wait upon a mapping's "associated"
- * buffers
+ * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
* @mapping: the mapping which wants those buffers written
*
* Starts I/O against the buffers at mapping->private_list, and waits upon
@@ -836,7 +838,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
smp_mb();
if (buffer_dirty(bh)) {
list_add(&bh->b_assoc_buffers,
- &bh->b_assoc_map->private_list);
+ &mapping->private_list);
bh->b_assoc_map = mapping;
}
spin_unlock(lock);
@@ -1182,7 +1184,20 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
void mark_buffer_dirty(struct buffer_head *bh)
{
WARN_ON_ONCE(!buffer_uptodate(bh));
- if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
+
+ /*
+ * Very *carefully* optimize the it-is-already-dirty case.
+ *
+ * Don't let the final "is it dirty" escape to before we
+ * perhaps modified the buffer.
+ */
+ if (buffer_dirty(bh)) {
+ smp_mb();
+ if (buffer_dirty(bh))
+ return;
+ }
+
+ if (!test_set_buffer_dirty(bh))
__set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
}
@@ -2231,6 +2246,8 @@ int cont_expand_zero(struct file *file, struct address_space *mapping,
goto out;
BUG_ON(err != len);
err = 0;
+
+ balance_dirty_pages_ratelimited(mapping);
}
/* page covers the boundary, find the boundary offset */
@@ -2565,14 +2582,13 @@ int nobh_write_end(struct file *file, struct address_space *mapping,
struct inode *inode = page->mapping->host;
struct buffer_head *head = fsdata;
struct buffer_head *bh;
+ BUG_ON(fsdata != NULL && page_has_buffers(page));
- if (!PageMappedToDisk(page)) {
- if (unlikely(copied < len) && !page_has_buffers(page))
- attach_nobh_buffers(page, head);
- if (page_has_buffers(page))
- return generic_write_end(file, mapping, pos, len,
- copied, page, fsdata);
- }
+ if (unlikely(copied < len) && !page_has_buffers(page))
+ attach_nobh_buffers(page, head);
+ if (page_has_buffers(page))
+ return generic_write_end(file, mapping, pos, len,
+ copied, page, fsdata);
SetPageUptodate(page);
set_page_dirty(page);
@@ -3169,8 +3185,7 @@ static void recalc_bh_state(void)
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
{
- struct buffer_head *ret = kmem_cache_alloc(bh_cachep,
- set_migrateflags(gfp_flags, __GFP_RECLAIMABLE));
+ struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
if (ret) {
INIT_LIST_HEAD(&ret->b_assoc_buffers);
get_cpu_var(bh_accounting).nr++;
@@ -3214,7 +3229,7 @@ static int buffer_cpu_notify(struct notifier_block *self,
}
/**
- * bh_uptodate_or_lock: Test whether the buffer is uptodate
+ * bh_uptodate_or_lock - Test whether the buffer is uptodate
* @bh: struct buffer_head
*
* Return true if the buffer is up-to-date and false,
@@ -3233,7 +3248,7 @@ int bh_uptodate_or_lock(struct buffer_head *bh)
EXPORT_SYMBOL(bh_uptodate_or_lock);
/**
- * bh_submit_read: Submit a locked buffer for reading
+ * bh_submit_read - Submit a locked buffer for reading
* @bh: struct buffer_head
*
* Returns zero on success and -EIO on error.