summaryrefslogtreecommitdiffstats
path: root/fs/xfs/linux-2.6/xfs_buf.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_buf.c')
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c19
1 files changed, 8 insertions, 11 deletions
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 9cc8f021309..36d5fcd3f59 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -58,7 +58,7 @@ xfs_buf_trace(
bp, id,
(void *)(unsigned long)bp->b_flags,
(void *)(unsigned long)bp->b_hold.counter,
- (void *)(unsigned long)bp->b_sema.count.counter,
+ (void *)(unsigned long)bp->b_sema.count,
(void *)current,
data, ra,
(void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff),
@@ -253,7 +253,7 @@ _xfs_buf_initialize(
memset(bp, 0, sizeof(xfs_buf_t));
atomic_set(&bp->b_hold, 1);
- init_MUTEX_LOCKED(&bp->b_iodonesema);
+ init_completion(&bp->b_iowait);
INIT_LIST_HEAD(&bp->b_list);
INIT_LIST_HEAD(&bp->b_hash_list);
init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */
@@ -838,6 +838,7 @@ xfs_buf_rele(
return;
}
+ ASSERT(atomic_read(&bp->b_hold) > 0);
if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) {
if (bp->b_relse) {
atomic_inc(&bp->b_hold);
@@ -851,11 +852,6 @@ xfs_buf_rele(
spin_unlock(&hash->bh_lock);
xfs_buf_free(bp);
}
- } else {
- /*
- * Catch reference count leaks
- */
- ASSERT(atomic_read(&bp->b_hold) >= 0);
}
}
@@ -1005,12 +1001,13 @@ xfs_buf_iodone_work(
* We can get an EOPNOTSUPP to ordered writes. Here we clear the
* ordered flag and reissue them. Because we can't tell the higher
* layers directly that they should not issue ordered I/O anymore, they
- * need to check if the ordered flag was cleared during I/O completion.
+ * need to check if the _XFS_BARRIER_FAILED flag was set during I/O completion.
*/
if ((bp->b_error == EOPNOTSUPP) &&
(bp->b_flags & (XBF_ORDERED|XBF_ASYNC)) == (XBF_ORDERED|XBF_ASYNC)) {
XB_TRACE(bp, "ordered_retry", bp->b_iodone);
bp->b_flags &= ~XBF_ORDERED;
+ bp->b_flags |= _XFS_BARRIER_FAILED;
xfs_buf_iorequest(bp);
} else if (bp->b_iodone)
(*(bp->b_iodone))(bp);
@@ -1037,7 +1034,7 @@ xfs_buf_ioend(
xfs_buf_iodone_work(&bp->b_iodone_work);
}
} else {
- up(&bp->b_iodonesema);
+ complete(&bp->b_iowait);
}
}
@@ -1275,7 +1272,7 @@ xfs_buf_iowait(
XB_TRACE(bp, "iowait", 0);
if (atomic_read(&bp->b_io_remaining))
blk_run_address_space(bp->b_target->bt_mapping);
- down(&bp->b_iodonesema);
+ wait_for_completion(&bp->b_iowait);
XB_TRACE(bp, "iowaited", (long)bp->b_error);
return bp->b_error;
}
@@ -1799,7 +1796,7 @@ int __init
xfs_buf_init(void)
{
#ifdef XFS_BUF_TRACE
- xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP);
+ xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_NOFS);
#endif
xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",