|
@@ -123,7 +123,6 @@ __xfs_inode_free(
|
|
|
{
|
|
|
/* asserts to verify all state is correct here */
|
|
|
ASSERT(atomic_read(&ip->i_pincount) == 0);
|
|
|
- ASSERT(!xfs_isiflocked(ip));
|
|
|
XFS_STATS_DEC(ip->i_mount, vn_active);
|
|
|
|
|
|
call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
|
|
@@ -133,6 +132,8 @@ void
|
|
|
xfs_inode_free(
|
|
|
struct xfs_inode *ip)
|
|
|
{
|
|
|
+ ASSERT(!xfs_isiflocked(ip));
|
|
|
+
|
|
|
/*
|
|
|
* Because we use RCU freeing we need to ensure the inode always
|
|
|
* appears to be reclaimed with an invalid inode number when in the
|
|
@@ -981,6 +982,7 @@ restart:
|
|
|
|
|
|
if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
|
|
|
xfs_iunpin_wait(ip);
|
|
|
+ /* xfs_iflush_abort() drops the flush lock */
|
|
|
xfs_iflush_abort(ip, false);
|
|
|
goto reclaim;
|
|
|
}
|
|
@@ -989,10 +991,10 @@ restart:
|
|
|
goto out_ifunlock;
|
|
|
xfs_iunpin_wait(ip);
|
|
|
}
|
|
|
- if (xfs_iflags_test(ip, XFS_ISTALE))
|
|
|
- goto reclaim;
|
|
|
- if (xfs_inode_clean(ip))
|
|
|
+ if (xfs_iflags_test(ip, XFS_ISTALE) || xfs_inode_clean(ip)) {
|
|
|
+ xfs_ifunlock(ip);
|
|
|
goto reclaim;
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* Never flush out dirty data during non-blocking reclaim, as it would
|
|
@@ -1030,25 +1032,24 @@ restart:
|
|
|
xfs_buf_relse(bp);
|
|
|
}
|
|
|
|
|
|
- xfs_iflock(ip);
|
|
|
reclaim:
|
|
|
+ ASSERT(!xfs_isiflocked(ip));
|
|
|
+
|
|
|
/*
|
|
|
* Because we use RCU freeing we need to ensure the inode always appears
|
|
|
* to be reclaimed with an invalid inode number when in the free state.
|
|
|
- * We do this as early as possible under the ILOCK and flush lock so
|
|
|
- * that xfs_iflush_cluster() can be guaranteed to detect races with us
|
|
|
- * here. By doing this, we guarantee that once xfs_iflush_cluster has
|
|
|
- * locked both the XFS_ILOCK and the flush lock that it will see either
|
|
|
- * a valid, flushable inode that will serialise correctly against the
|
|
|
- * locks below, or it will see a clean (and invalid) inode that it can
|
|
|
- * skip.
|
|
|
+ * We do this as early as possible under the ILOCK so that
|
|
|
+ * xfs_iflush_cluster() can be guaranteed to detect races with us here.
|
|
|
+ * By doing this, we guarantee that once xfs_iflush_cluster has locked
|
|
|
+ * XFS_ILOCK that it will see either a valid, flushable inode that will
|
|
|
+ * serialise correctly, or it will see a clean (and invalid) inode that
|
|
|
+ * it can skip.
|
|
|
*/
|
|
|
spin_lock(&ip->i_flags_lock);
|
|
|
ip->i_flags = XFS_IRECLAIM;
|
|
|
ip->i_ino = 0;
|
|
|
spin_unlock(&ip->i_flags_lock);
|
|
|
|
|
|
- xfs_ifunlock(ip);
|
|
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
|
|
|
|
|
XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
|