diff options
Diffstat (limited to 'fs/xfs/xfs_inode.c')
-rw-r--r-- | fs/xfs/xfs_inode.c | 64 |
1 files changed, 27 insertions, 37 deletions
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index c27d7d495aa..d72c80dbfbb 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -2193,7 +2193,7 @@ xfs_ifree_cluster( /* Inode not in memory or we found it already, * nothing to do */ - if (!ip || (ip->i_flags & XFS_ISTALE)) { + if (!ip || xfs_iflags_test(ip, XFS_ISTALE)) { read_unlock(&ih->ih_lock); continue; } @@ -2215,10 +2215,7 @@ xfs_ifree_cluster( if (ip == free_ip) { if (xfs_iflock_nowait(ip)) { - spin_lock(&ip->i_flags_lock); - ip->i_flags |= XFS_ISTALE; - spin_unlock(&ip->i_flags_lock); - + xfs_iflags_set(ip, XFS_ISTALE); if (xfs_inode_clean(ip)) { xfs_ifunlock(ip); } else { @@ -2231,9 +2228,7 @@ xfs_ifree_cluster( if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { if (xfs_iflock_nowait(ip)) { - spin_lock(&ip->i_flags_lock); - ip->i_flags |= XFS_ISTALE; - spin_unlock(&ip->i_flags_lock); + xfs_iflags_set(ip, XFS_ISTALE); if (xfs_inode_clean(ip)) { xfs_ifunlock(ip); @@ -2263,9 +2258,7 @@ xfs_ifree_cluster( AIL_LOCK(mp,s); iip->ili_flush_lsn = iip->ili_item.li_lsn; AIL_UNLOCK(mp, s); - spin_lock(&iip->ili_inode->i_flags_lock); - iip->ili_inode->i_flags |= XFS_ISTALE; - spin_unlock(&iip->ili_inode->i_flags_lock); + xfs_iflags_set(ip, XFS_ISTALE); pre_flushed++; } lip = lip->li_bio_list; @@ -2748,42 +2741,39 @@ xfs_iunpin( { ASSERT(atomic_read(&ip->i_pincount) > 0); - if (atomic_dec_and_test(&ip->i_pincount)) { + if (atomic_dec_and_lock(&ip->i_pincount, &ip->i_flags_lock)) { + /* - * If the inode is currently being reclaimed, the - * linux inode _and_ the xfs vnode may have been - * freed so we cannot reference either of them safely. - * Hence we should not try to do anything to them - * if the xfs inode is currently in the reclaim - * path. + * If the inode is currently being reclaimed, the link between + * the bhv_vnode and the xfs_inode will be broken after the + * XFS_IRECLAIM* flag is set. Hence, if these flags are not + * set, then we can move forward and mark the linux inode dirty + * knowing that it is still valid as it won't freed until after + * the bhv_vnode<->xfs_inode link is broken in xfs_reclaim. The + * i_flags_lock is used to synchronise the setting of the + * XFS_IRECLAIM* flags and the breaking of the link, and so we + * can execute atomically w.r.t to reclaim by holding this lock + * here. * - * However, we still need to issue the unpin wakeup - * call as the inode reclaim may be blocked waiting for - * the inode to become unpinned. + * However, we still need to issue the unpin wakeup call as the + * inode reclaim may be blocked waiting for the inode to become + * unpinned. */ - struct inode *inode = NULL; - spin_lock(&ip->i_flags_lock); - if (!(ip->i_flags & (XFS_IRECLAIM|XFS_IRECLAIMABLE))) { + if (!__xfs_iflags_test(ip, XFS_IRECLAIM|XFS_IRECLAIMABLE)) { bhv_vnode_t *vp = XFS_ITOV_NULL(ip); + struct inode *inode = NULL; + + BUG_ON(vp == NULL); + inode = vn_to_inode(vp); + BUG_ON(inode->i_state & I_CLEAR); /* make sync come back and flush this inode */ - if (vp) { - inode = vn_to_inode(vp); - - if (!(inode->i_state & - (I_NEW|I_FREEING|I_CLEAR))) { - inode = igrab(inode); - if (inode) - mark_inode_dirty_sync(inode); - } else - inode = NULL; - } + if (!(inode->i_state & (I_NEW|I_FREEING))) + mark_inode_dirty_sync(inode); } spin_unlock(&ip->i_flags_lock); wake_up(&ip->i_ipin_wait); - if (inode) - iput(inode); } } |