From 09a423a3d6c70905f1090f01aadb8e6abff527ce Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Feb 2012 02:31:20 +0000 Subject: xfs: split tail_lsn assignments from log space wakeups Currently xfs_log_move_tail has a tail_lsn argument that is horribly overloaded: it may contain either an actual lsn to assign to the log tail, 0 as a special case to use the last sync LSN, or 1 to indicate that no tail LSN assignment should be performed, and we should opportunisticly wake up at one task waiting for log space even if we did not move the LSN. Remove the tail lsn assigned from xfs_log_move_tail and make the two callers use xlog_assign_tail_lsn instead of the current variant of partially using the code in xfs_log_move_tail and partially opencoding it. Note that means we grow an addition lock roundtrip on the AIL lock for each bulk update or delete, which is still far less than what we had before introducing the bulk operations. If this proves to be a problem we can still add a variant of xlog_assign_tail_lsn that expects the lock to be held already. Also rename the remainder of xfs_log_move_tail to xfs_log_space_wake as that name describes its functionality much better. Reviewed-by: Mark Tinguely Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Ben Myers --- fs/xfs/xfs_trans_ail.c | 45 +++++++++++---------------------------------- 1 file changed, 11 insertions(+), 34 deletions(-) (limited to 'fs/xfs/xfs_trans_ail.c') diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index ed9252bcdac..c9234956bcb 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c @@ -643,15 +643,15 @@ xfs_trans_unlocked_item( * at the tail, it doesn't matter what result we get back. This * is slightly racy because since we were just unlocked, we could * go to sleep between the call to xfs_ail_min and the call to - * xfs_log_move_tail, have someone else lock us, commit to us disk, + * xfs_log_space_wake, have someone else lock us, commit to us disk, * move us out of the tail of the AIL, and then we wake up. However, - * the call to xfs_log_move_tail() doesn't do anything if there's + * the call to xfs_log_space_wake() doesn't do anything if there's * not enough free space to wake people up so we're safe calling it. */ min_lip = xfs_ail_min(ailp); if (min_lip == lip) - xfs_log_move_tail(ailp->xa_mount, 1); + xfs_log_space_wake(ailp->xa_mount, true); } /* xfs_trans_unlocked_item */ /* @@ -685,7 +685,6 @@ xfs_trans_ail_update_bulk( xfs_lsn_t lsn) __releases(ailp->xa_lock) { xfs_log_item_t *mlip; - xfs_lsn_t tail_lsn; int mlip_changed = 0; int i; LIST_HEAD(tmp); @@ -712,22 +711,12 @@ xfs_trans_ail_update_bulk( if (!list_empty(&tmp)) xfs_ail_splice(ailp, cur, &tmp, lsn); + spin_unlock(&ailp->xa_lock); - if (!mlip_changed) { - spin_unlock(&ailp->xa_lock); - return; + if (mlip_changed && !XFS_FORCED_SHUTDOWN(ailp->xa_mount)) { + xlog_assign_tail_lsn(ailp->xa_mount); + xfs_log_space_wake(ailp->xa_mount, false); } - - /* - * It is not safe to access mlip after the AIL lock is dropped, so we - * must get a copy of li_lsn before we do so. This is especially - * important on 32-bit platforms where accessing and updating 64-bit - * values like li_lsn is not atomic. - */ - mlip = xfs_ail_min(ailp); - tail_lsn = mlip->li_lsn; - spin_unlock(&ailp->xa_lock); - xfs_log_move_tail(ailp->xa_mount, tail_lsn); } /* @@ -758,7 +747,6 @@ xfs_trans_ail_delete_bulk( int nr_items) __releases(ailp->xa_lock) { xfs_log_item_t *mlip; - xfs_lsn_t tail_lsn; int mlip_changed = 0; int i; @@ -785,23 +773,12 @@ xfs_trans_ail_delete_bulk( if (mlip == lip) mlip_changed = 1; } + spin_unlock(&ailp->xa_lock); - if (!mlip_changed) { - spin_unlock(&ailp->xa_lock); - return; + if (mlip_changed && !XFS_FORCED_SHUTDOWN(ailp->xa_mount)) { + xlog_assign_tail_lsn(ailp->xa_mount); + xfs_log_space_wake(ailp->xa_mount, false); } - - /* - * It is not safe to access mlip after the AIL lock is dropped, so we - * must get a copy of li_lsn before we do so. This is especially - * important on 32-bit platforms where accessing and updating 64-bit - * values like li_lsn is not atomic. It is possible we've emptied the - * AIL here, so if that is the case, pass an LSN of 0 to the tail move. - */ - mlip = xfs_ail_min(ailp); - tail_lsn = mlip ? mlip->li_lsn : 0; - spin_unlock(&ailp->xa_lock); - xfs_log_move_tail(ailp->xa_mount, tail_lsn); } /* -- cgit v1.2.3-70-g09d2 From 5b03ff1b2444ddf7b8084b7505101e97257aff5a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Feb 2012 02:31:22 +0000 Subject: xfs: remove xfs_trans_unlocked_item There is no reason to wake up log space waiters when unlocking inodes or dquots, and the commit log has no explanation for this function either. Given that we now have exact log space wakeups everywhere we can assume the reason for this function was to paper over log space races in earlier XFS versions. Reviewed-by: Mark Tinguely Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Ben Myers --- fs/xfs/xfs_dquot.c | 11 ----------- fs/xfs/xfs_dquot.h | 3 +-- fs/xfs/xfs_iget.c | 13 +------------ fs/xfs/xfs_inode.h | 4 +--- fs/xfs/xfs_inode_item.c | 6 +----- fs/xfs/xfs_trans_ail.c | 44 -------------------------------------------- fs/xfs/xfs_trans_buf.c | 25 +------------------------ fs/xfs/xfs_trans_priv.h | 3 --- 8 files changed, 5 insertions(+), 104 deletions(-) (limited to 'fs/xfs/xfs_trans_ail.c') diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c index 4e268edcf3f..71e615fef17 100644 --- a/fs/xfs/xfs_dquot.c +++ b/fs/xfs/xfs_dquot.c @@ -1076,17 +1076,6 @@ xfs_qm_dqflush( } -void -xfs_dqunlock( - xfs_dquot_t *dqp) -{ - xfs_dqunlock_nonotify(dqp); - if (dqp->q_logitem.qli_dquot == dqp) { - xfs_trans_unlocked_item(dqp->q_logitem.qli_item.li_ailp, - &dqp->q_logitem.qli_item); - } -} - /* * Lock two xfs_dquot structures. * diff --git a/fs/xfs/xfs_dquot.h b/fs/xfs/xfs_dquot.h index 48a795b141b..60b0d72b024 100644 --- a/fs/xfs/xfs_dquot.h +++ b/fs/xfs/xfs_dquot.h @@ -110,7 +110,7 @@ static inline void xfs_dqlock(struct xfs_dquot *dqp) mutex_lock(&dqp->q_qlock); } -static inline void xfs_dqunlock_nonotify(struct xfs_dquot *dqp) +static inline void xfs_dqunlock(struct xfs_dquot *dqp) { mutex_unlock(&dqp->q_qlock); } @@ -166,7 +166,6 @@ extern int xfs_qm_dqget(xfs_mount_t *, xfs_inode_t *, extern void xfs_qm_dqput(xfs_dquot_t *); extern void xfs_dqlock2(struct xfs_dquot *, struct xfs_dquot *); -extern void xfs_dqunlock(struct xfs_dquot *); extern void xfs_dqflock_pushbuf_wait(struct xfs_dquot *dqp); static inline struct xfs_dquot *xfs_qm_dqhold(struct xfs_dquot *dqp) diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 8c3e46394d4..19dcfb2aac9 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -642,8 +642,7 @@ xfs_iunlock( (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); - ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_IUNLOCK_NONOTIFY | - XFS_LOCK_DEP_MASK)) == 0); + ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0); ASSERT(lock_flags != 0); if (lock_flags & XFS_IOLOCK_EXCL) @@ -656,16 +655,6 @@ xfs_iunlock( else if (lock_flags & XFS_ILOCK_SHARED) mrunlock_shared(&ip->i_lock); - if ((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) && - !(lock_flags & XFS_IUNLOCK_NONOTIFY) && ip->i_itemp) { - /* - * Let the AIL know that this item has been unlocked in case - * it is in the AIL and anyone is waiting on it. Don't do - * this if the caller has asked us not to. - */ - xfs_trans_unlocked_item(ip->i_itemp->ili_item.li_ailp, - (xfs_log_item_t*)(ip->i_itemp)); - } trace_xfs_iunlock(ip, lock_flags, _RET_IP_); } diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 2f27b745408..eda49378039 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -422,7 +422,6 @@ static inline int xfs_isiflocked(struct xfs_inode *ip) #define XFS_IOLOCK_SHARED (1<<1) #define XFS_ILOCK_EXCL (1<<2) #define XFS_ILOCK_SHARED (1<<3) -#define XFS_IUNLOCK_NONOTIFY (1<<4) #define XFS_LOCK_MASK (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED \ | XFS_ILOCK_EXCL | XFS_ILOCK_SHARED) @@ -431,8 +430,7 @@ static inline int xfs_isiflocked(struct xfs_inode *ip) { XFS_IOLOCK_EXCL, "IOLOCK_EXCL" }, \ { XFS_IOLOCK_SHARED, "IOLOCK_SHARED" }, \ { XFS_ILOCK_EXCL, "ILOCK_EXCL" }, \ - { XFS_ILOCK_SHARED, "ILOCK_SHARED" }, \ - { XFS_IUNLOCK_NONOTIFY, "IUNLOCK_NONOTIFY" } + { XFS_ILOCK_SHARED, "ILOCK_SHARED" } /* diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index 91d71dcd485..adc8a261b5d 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c @@ -596,11 +596,7 @@ xfs_inode_item_trylock( /* Stale items should force out the iclog */ if (ip->i_flags & XFS_ISTALE) { xfs_ifunlock(ip); - /* - * we hold the AIL lock - notify the unlock routine of this - * so it doesn't try to get the lock again. - */ - xfs_iunlock(ip, XFS_ILOCK_SHARED|XFS_IUNLOCK_NONOTIFY); + xfs_iunlock(ip, XFS_ILOCK_SHARED); return XFS_ITEM_PINNED; } diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index c9234956bcb..9d5fc089ea2 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c @@ -610,50 +610,6 @@ xfs_ail_push_all( xfs_ail_push(ailp, threshold_lsn); } -/* - * This is to be called when an item is unlocked that may have - * been in the AIL. It will wake up the first member of the AIL - * wait list if this item's unlocking might allow it to progress. - * If the item is in the AIL, then we need to get the AIL lock - * while doing our checking so we don't race with someone going - * to sleep waiting for this event in xfs_trans_push_ail(). - */ -void -xfs_trans_unlocked_item( - struct xfs_ail *ailp, - xfs_log_item_t *lip) -{ - xfs_log_item_t *min_lip; - - /* - * If we're forcibly shutting down, we may have - * unlocked log items arbitrarily. The last thing - * we want to do is to move the tail of the log - * over some potentially valid data. - */ - if (!(lip->li_flags & XFS_LI_IN_AIL) || - XFS_FORCED_SHUTDOWN(ailp->xa_mount)) { - return; - } - - /* - * This is the one case where we can call into xfs_ail_min() - * without holding the AIL lock because we only care about the - * case where we are at the tail of the AIL. If the object isn't - * at the tail, it doesn't matter what result we get back. This - * is slightly racy because since we were just unlocked, we could - * go to sleep between the call to xfs_ail_min and the call to - * xfs_log_space_wake, have someone else lock us, commit to us disk, - * move us out of the tail of the AIL, and then we wake up. However, - * the call to xfs_log_space_wake() doesn't do anything if there's - * not enough free space to wake people up so we're safe calling it. - */ - min_lip = xfs_ail_min(ailp); - - if (min_lip == lip) - xfs_log_space_wake(ailp->xa_mount, true); -} /* xfs_trans_unlocked_item */ - /* * xfs_trans_ail_update - bulk AIL insertion operation. * diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c index 475a4ded4f4..1302d1d95a5 100644 --- a/fs/xfs/xfs_trans_buf.c +++ b/fs/xfs/xfs_trans_buf.c @@ -463,19 +463,7 @@ xfs_trans_brelse(xfs_trans_t *tp, * Default to a normal brelse() call if the tp is NULL. */ if (tp == NULL) { - struct xfs_log_item *lip = bp->b_fspriv; - ASSERT(bp->b_transp == NULL); - - /* - * If there's a buf log item attached to the buffer, - * then let the AIL know that the buffer is being - * unlocked. - */ - if (lip != NULL && lip->li_type == XFS_LI_BUF) { - bip = bp->b_fspriv; - xfs_trans_unlocked_item(bip->bli_item.li_ailp, lip); - } xfs_buf_relse(bp); return; } @@ -550,21 +538,10 @@ xfs_trans_brelse(xfs_trans_t *tp, ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL)); ASSERT(!(bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF)); xfs_buf_item_relse(bp); - bip = NULL; - } - bp->b_transp = NULL; - - /* - * If we've still got a buf log item on the buffer, then - * tell the AIL that the buffer is being unlocked. - */ - if (bip != NULL) { - xfs_trans_unlocked_item(bip->bli_item.li_ailp, - (xfs_log_item_t*)bip); } + bp->b_transp = NULL; xfs_buf_relse(bp); - return; } /* diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h index 44820b9fcb4..8ab2ced415f 100644 --- a/fs/xfs/xfs_trans_priv.h +++ b/fs/xfs/xfs_trans_priv.h @@ -104,9 +104,6 @@ void xfs_ail_push(struct xfs_ail *, xfs_lsn_t); void xfs_ail_push_all(struct xfs_ail *); xfs_lsn_t xfs_ail_min_lsn(struct xfs_ail *ailp); -void xfs_trans_unlocked_item(struct xfs_ail *, - xfs_log_item_t *); - struct xfs_log_item * xfs_trans_ail_cursor_first(struct xfs_ail *ailp, struct xfs_ail_cursor *cur, xfs_lsn_t lsn); -- cgit v1.2.3-70-g09d2 From cfb7cdca0aca5ee2e2ef491284bf1edc3b581885 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Feb 2012 02:31:23 +0000 Subject: xfs: cleanup xfs_log_space_wake Remove the now unused opportunistic parameter, and use the the xlog_writeq_wake and xlog_reserveq_wake helpers now that we don't have to care about the opportunistic wakeups. Reviewed-by: Mark Tinguely Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Ben Myers --- fs/xfs/xfs_log.c | 35 +++++------------------------------ fs/xfs/xfs_log.h | 3 +-- fs/xfs/xfs_trans_ail.c | 4 ++-- 3 files changed, 8 insertions(+), 34 deletions(-) (limited to 'fs/xfs/xfs_trans_ail.c') diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 9161e8a76e7..2db39df5a57 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -762,18 +762,13 @@ xfs_log_item_init( /* * Wake up processes waiting for log space after we have moved the log tail. - * - * If opportunistic is set wake up one waiter even if we do not have enough - * free space by our strict accounting. */ void xfs_log_space_wake( - struct xfs_mount *mp, - bool opportunistic) + struct xfs_mount *mp) { - struct xlog_ticket *tic; struct log *log = mp->m_log; - int need_bytes, free_bytes; + int free_bytes; if (XLOG_FORCED_SHUTDOWN(log)) return; @@ -783,16 +778,7 @@ xfs_log_space_wake( spin_lock(&log->l_grant_write_lock); free_bytes = xlog_space_left(log, &log->l_grant_write_head); - list_for_each_entry(tic, &log->l_writeq, t_queue) { - ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV); - - if (free_bytes < tic->t_unit_res && !opportunistic) - break; - opportunistic = false; - free_bytes -= tic->t_unit_res; - trace_xfs_log_regrant_write_wake_up(log, tic); - wake_up(&tic->t_wait); - } + xlog_writeq_wake(log, &free_bytes); spin_unlock(&log->l_grant_write_lock); } @@ -801,18 +787,7 @@ xfs_log_space_wake( spin_lock(&log->l_grant_reserve_lock); free_bytes = xlog_space_left(log, &log->l_grant_reserve_head); - list_for_each_entry(tic, &log->l_reserveq, t_queue) { - if (tic->t_flags & XLOG_TIC_PERM_RESERV) - need_bytes = tic->t_unit_res*tic->t_cnt; - else - need_bytes = tic->t_unit_res; - if (free_bytes < need_bytes && !opportunistic) - break; - opportunistic = false; - free_bytes -= need_bytes; - trace_xfs_log_grant_wake_up(log, tic); - wake_up(&tic->t_wait); - } + xlog_reserveq_wake(log, &free_bytes); spin_unlock(&log->l_grant_reserve_lock); } } @@ -2748,7 +2723,7 @@ xlog_ungrant_log_space(xlog_t *log, trace_xfs_log_ungrant_exit(log, ticket); - xfs_log_space_wake(log->l_mp, false); + xfs_log_space_wake(log->l_mp); } /* diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h index 58d858074e6..fe32c692787 100644 --- a/fs/xfs/xfs_log.h +++ b/fs/xfs/xfs_log.h @@ -161,8 +161,7 @@ int xfs_log_mount(struct xfs_mount *mp, int num_bblocks); int xfs_log_mount_finish(struct xfs_mount *mp); xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp); -void xfs_log_space_wake(struct xfs_mount *mp, - bool opportunistic); +void xfs_log_space_wake(struct xfs_mount *mp); int xfs_log_notify(struct xfs_mount *mp, struct xlog_in_core *iclog, xfs_log_callback_t *callback_entry); diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index 9d5fc089ea2..1dead07f092 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c @@ -671,7 +671,7 @@ xfs_trans_ail_update_bulk( if (mlip_changed && !XFS_FORCED_SHUTDOWN(ailp->xa_mount)) { xlog_assign_tail_lsn(ailp->xa_mount); - xfs_log_space_wake(ailp->xa_mount, false); + xfs_log_space_wake(ailp->xa_mount); } } @@ -733,7 +733,7 @@ xfs_trans_ail_delete_bulk( if (mlip_changed && !XFS_FORCED_SHUTDOWN(ailp->xa_mount)) { xlog_assign_tail_lsn(ailp->xa_mount); - xfs_log_space_wake(ailp->xa_mount, false); + xfs_log_space_wake(ailp->xa_mount); } } -- cgit v1.2.3-70-g09d2