summaryrefslogtreecommitdiffstats
path: root/fs/xfs/linux-2.6/xfs_buf.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-17 09:04:11 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-17 09:04:11 -0700
commit347c53dca73fca317d57781f510f5ff4f6c0d0d7 (patch)
treecdc405ac049751da4d76085ce58750b6b2a22326 /fs/xfs/linux-2.6/xfs_buf.c
parent5c8e191e8437616a498a8e1cc0af3dd0d32bbff2 (diff)
parent7f015072348a14f16d548be557ee58c5c55df0aa (diff)
Merge branch 'for-linus' of git://oss.sgi.com:8090/xfs/xfs-2.6
* 'for-linus' of git://oss.sgi.com:8090/xfs/xfs-2.6: (59 commits) [XFS] eagerly remove vmap mappings to avoid upsetting Xen [XFS] simplify validata_fields [XFS] no longer using io_vnode, as was remaining from 23 cherrypick [XFS] Remove STATIC which was missing from prior manual merge [XFS] Put back the QUEUE_ORDERED_NONE test in the barrier check. [XFS] Turn off XBF_ASYNC flag before re-reading superblock. [XFS] avoid race in sync_inodes() that can fail to write out all dirty data [XFS] This fix prevents bulkstat from spinning in an infinite loop. [XFS] simplify xfs_create/mknod/symlink prototype [XFS] avoid xfs_getattr in XFS_IOC_FSGETXATTR ioctl [XFS] get_bulkall() could return incorrect inode state [XFS] Kill unused IOMAP_EOF flag [XFS] fix when DMAPI mount option processing happens [XFS] ensure file size is logged on synchronous writes [XFS] growlock should be a mutex [XFS] replace some large xfs_log_priv.h macros by proper functions [XFS] kill struct bhv_vfs [XFS] move syncing related members from struct bhv_vfs to struct xfs_mount [XFS] kill the vfs_flags member in struct bhv_vfs [XFS] kill the vfs_fsid and vfs_altfsid members in struct bhv_vfs ...
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_buf.c')
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c26
1 files changed, 25 insertions, 1 deletions
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 39f44ee572e..b9c8589e05c 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -187,6 +187,19 @@ free_address(
{
a_list_t *aentry;
+#ifdef CONFIG_XEN
+ /*
+ * Xen needs to be able to make sure it can get an exclusive
+ * RO mapping of pages it wants to turn into a pagetable. If
+ * a newly allocated page is also still being vmap()ed by xfs,
+ * it will cause pagetable construction to fail. This is a
+ * quick workaround to always eagerly unmap pages so that Xen
+ * is happy.
+ */
+ vunmap(addr);
+ return;
+#endif
+
aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
if (likely(aentry)) {
spin_lock(&as_lock);
@@ -997,7 +1010,18 @@ xfs_buf_iodone_work(
xfs_buf_t *bp =
container_of(work, xfs_buf_t, b_iodone_work);
- if (bp->b_iodone)
+ /*
+ * We can get an EOPNOTSUPP to ordered writes. Here we clear the
+ * ordered flag and reissue them. Because we can't tell the higher
+ * layers directly that they should not issue ordered I/O anymore, they
+ * need to check if the ordered flag was cleared during I/O completion.
+ */
+ if ((bp->b_error == EOPNOTSUPP) &&
+ (bp->b_flags & (XBF_ORDERED|XBF_ASYNC)) == (XBF_ORDERED|XBF_ASYNC)) {
+ XB_TRACE(bp, "ordered_retry", bp->b_iodone);
+ bp->b_flags &= ~XBF_ORDERED;
+ xfs_buf_iorequest(bp);
+ } else if (bp->b_iodone)
(*(bp->b_iodone))(bp);
else if (bp->b_flags & XBF_ASYNC)
xfs_buf_relse(bp);