From 7d4fb40ad7efe4586d1341d4731377fb4530836f Mon Sep 17 00:00:00 2001 From: Nathan Scott Date: Fri, 9 Jun 2006 15:27:16 +1000 Subject: [XFS] Start writeout earlier (on last close) in the case where we have a truncate down followed by delayed allocation (buffered writes) - worst case scenario for the notorious NULL files problem. This reduces the window where we are exposed to that problem significantly. SGI-PV: 917976 SGI-Modid: xfs-linux-melb:xfs-kern:26100a Signed-off-by: Nathan Scott --- fs/xfs/linux-2.6/xfs_aops.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'fs/xfs/linux-2.6/xfs_aops.c') diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 5835e699a7f..c0a90431685 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -1157,6 +1157,18 @@ out_unlock: return error; } +STATIC int +xfs_vm_writepages( + struct address_space *mapping, + struct writeback_control *wbc) +{ + struct vnode *vp = vn_from_inode(mapping->host); + + if (VN_TRUNC(vp)) + VUNTRUNCATE(vp); + return generic_writepages(mapping, wbc); +} + /* * Called to move a page into cleanable state - and from there * to be released. Possibly the page is already clean. We always @@ -1451,6 +1463,7 @@ struct address_space_operations xfs_address_space_operations = { .readpage = xfs_vm_readpage, .readpages = xfs_vm_readpages, .writepage = xfs_vm_writepage, + .writepages = xfs_vm_writepages, .sync_page = block_sync_page, .releasepage = xfs_vm_releasepage, .invalidatepage = xfs_vm_invalidatepage, -- cgit v1.2.3-70-g09d2