|
@@ -37,8 +37,6 @@
|
|
#include <linux/kthread.h>
|
|
#include <linux/kthread.h>
|
|
#include <linux/freezer.h>
|
|
#include <linux/freezer.h>
|
|
|
|
|
|
-STATIC void xfs_inode_clear_reclaim_tag(struct xfs_perag *pag, xfs_ino_t ino);
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Allocate and initialise an xfs_inode.
|
|
* Allocate and initialise an xfs_inode.
|
|
*/
|
|
*/
|
|
@@ -143,6 +141,122 @@ xfs_inode_free(
|
|
__xfs_inode_free(ip);
|
|
__xfs_inode_free(ip);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * Queue a new inode reclaim pass if there are reclaimable inodes and there
|
|
|
|
+ * isn't a reclaim pass already in progress. By default it runs every 5s based
|
|
|
|
+ * on the xfs periodic sync default of 30s. Perhaps this should have it's own
|
|
|
|
+ * tunable, but that can be done if this method proves to be ineffective or too
|
|
|
|
+ * aggressive.
|
|
|
|
+ */
|
|
|
|
+static void
|
|
|
|
+xfs_reclaim_work_queue(
|
|
|
|
+ struct xfs_mount *mp)
|
|
|
|
+{
|
|
|
|
+
|
|
|
|
+ rcu_read_lock();
|
|
|
|
+ if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
|
|
|
|
+ queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
|
|
|
|
+ msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
|
|
|
|
+ }
|
|
|
|
+ rcu_read_unlock();
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * This is a fast pass over the inode cache to try to get reclaim moving on as
|
|
|
|
+ * many inodes as possible in a short period of time. It kicks itself every few
|
|
|
|
+ * seconds, as well as being kicked by the inode cache shrinker when memory
|
|
|
|
+ * goes low. It scans as quickly as possible avoiding locked inodes or those
|
|
|
|
+ * already being flushed, and once done schedules a future pass.
|
|
|
|
+ */
|
|
|
|
+void
|
|
|
|
+xfs_reclaim_worker(
|
|
|
|
+ struct work_struct *work)
|
|
|
|
+{
|
|
|
|
+ struct xfs_mount *mp = container_of(to_delayed_work(work),
|
|
|
|
+ struct xfs_mount, m_reclaim_work);
|
|
|
|
+
|
|
|
|
+ xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
|
|
|
|
+ xfs_reclaim_work_queue(mp);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void
|
|
|
|
+xfs_perag_set_reclaim_tag(
|
|
|
|
+ struct xfs_perag *pag)
|
|
|
|
+{
|
|
|
|
+ struct xfs_mount *mp = pag->pag_mount;
|
|
|
|
+
|
|
|
|
+ ASSERT(spin_is_locked(&pag->pag_ici_lock));
|
|
|
|
+ if (pag->pag_ici_reclaimable++)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ /* propagate the reclaim tag up into the perag radix tree */
|
|
|
|
+ spin_lock(&mp->m_perag_lock);
|
|
|
|
+ radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno,
|
|
|
|
+ XFS_ICI_RECLAIM_TAG);
|
|
|
|
+ spin_unlock(&mp->m_perag_lock);
|
|
|
|
+
|
|
|
|
+ /* schedule periodic background inode reclaim */
|
|
|
|
+ xfs_reclaim_work_queue(mp);
|
|
|
|
+
|
|
|
|
+ trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void
|
|
|
|
+xfs_perag_clear_reclaim_tag(
|
|
|
|
+ struct xfs_perag *pag)
|
|
|
|
+{
|
|
|
|
+ struct xfs_mount *mp = pag->pag_mount;
|
|
|
|
+
|
|
|
|
+ ASSERT(spin_is_locked(&pag->pag_ici_lock));
|
|
|
|
+ if (--pag->pag_ici_reclaimable)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ /* clear the reclaim tag from the perag radix tree */
|
|
|
|
+ spin_lock(&mp->m_perag_lock);
|
|
|
|
+ radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno,
|
|
|
|
+ XFS_ICI_RECLAIM_TAG);
|
|
|
|
+ spin_unlock(&mp->m_perag_lock);
|
|
|
|
+ trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * We set the inode flag atomically with the radix tree tag.
|
|
|
|
+ * Once we get tag lookups on the radix tree, this inode flag
|
|
|
|
+ * can go away.
|
|
|
|
+ */
|
|
|
|
+void
|
|
|
|
+xfs_inode_set_reclaim_tag(
|
|
|
|
+ struct xfs_inode *ip)
|
|
|
|
+{
|
|
|
|
+ struct xfs_mount *mp = ip->i_mount;
|
|
|
|
+ struct xfs_perag *pag;
|
|
|
|
+
|
|
|
|
+ pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
|
|
|
|
+ spin_lock(&pag->pag_ici_lock);
|
|
|
|
+ spin_lock(&ip->i_flags_lock);
|
|
|
|
+
|
|
|
|
+ radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino),
|
|
|
|
+ XFS_ICI_RECLAIM_TAG);
|
|
|
|
+ xfs_perag_set_reclaim_tag(pag);
|
|
|
|
+ __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
|
|
|
|
+
|
|
|
|
+ spin_unlock(&ip->i_flags_lock);
|
|
|
|
+ spin_unlock(&pag->pag_ici_lock);
|
|
|
|
+ xfs_perag_put(pag);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+STATIC void
|
|
|
|
+xfs_inode_clear_reclaim_tag(
|
|
|
|
+ struct xfs_perag *pag,
|
|
|
|
+ xfs_ino_t ino)
|
|
|
|
+{
|
|
|
|
+ radix_tree_tag_clear(&pag->pag_ici_root,
|
|
|
|
+ XFS_INO_TO_AGINO(pag->pag_mount, ino),
|
|
|
|
+ XFS_ICI_RECLAIM_TAG);
|
|
|
|
+ xfs_perag_clear_reclaim_tag(pag);
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* When we recycle a reclaimable inode, we need to re-initialise the VFS inode
|
|
* When we recycle a reclaimable inode, we need to re-initialise the VFS inode
|
|
* part of the structure. This is made more complex by the fact we store
|
|
* part of the structure. This is made more complex by the fact we store
|
|
@@ -728,122 +842,6 @@ xfs_inode_ag_iterator_tag(
|
|
return last_error;
|
|
return last_error;
|
|
}
|
|
}
|
|
|
|
|
|
-/*
|
|
|
|
- * Queue a new inode reclaim pass if there are reclaimable inodes and there
|
|
|
|
- * isn't a reclaim pass already in progress. By default it runs every 5s based
|
|
|
|
- * on the xfs periodic sync default of 30s. Perhaps this should have it's own
|
|
|
|
- * tunable, but that can be done if this method proves to be ineffective or too
|
|
|
|
- * aggressive.
|
|
|
|
- */
|
|
|
|
-static void
|
|
|
|
-xfs_reclaim_work_queue(
|
|
|
|
- struct xfs_mount *mp)
|
|
|
|
-{
|
|
|
|
-
|
|
|
|
- rcu_read_lock();
|
|
|
|
- if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
|
|
|
|
- queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
|
|
|
|
- msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
|
|
|
|
- }
|
|
|
|
- rcu_read_unlock();
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * This is a fast pass over the inode cache to try to get reclaim moving on as
|
|
|
|
- * many inodes as possible in a short period of time. It kicks itself every few
|
|
|
|
- * seconds, as well as being kicked by the inode cache shrinker when memory
|
|
|
|
- * goes low. It scans as quickly as possible avoiding locked inodes or those
|
|
|
|
- * already being flushed, and once done schedules a future pass.
|
|
|
|
- */
|
|
|
|
-void
|
|
|
|
-xfs_reclaim_worker(
|
|
|
|
- struct work_struct *work)
|
|
|
|
-{
|
|
|
|
- struct xfs_mount *mp = container_of(to_delayed_work(work),
|
|
|
|
- struct xfs_mount, m_reclaim_work);
|
|
|
|
-
|
|
|
|
- xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
|
|
|
|
- xfs_reclaim_work_queue(mp);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void
|
|
|
|
-xfs_perag_set_reclaim_tag(
|
|
|
|
- struct xfs_perag *pag)
|
|
|
|
-{
|
|
|
|
- struct xfs_mount *mp = pag->pag_mount;
|
|
|
|
-
|
|
|
|
- ASSERT(spin_is_locked(&pag->pag_ici_lock));
|
|
|
|
- if (pag->pag_ici_reclaimable++)
|
|
|
|
- return;
|
|
|
|
-
|
|
|
|
- /* propagate the reclaim tag up into the perag radix tree */
|
|
|
|
- spin_lock(&mp->m_perag_lock);
|
|
|
|
- radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno,
|
|
|
|
- XFS_ICI_RECLAIM_TAG);
|
|
|
|
- spin_unlock(&mp->m_perag_lock);
|
|
|
|
-
|
|
|
|
- /* schedule periodic background inode reclaim */
|
|
|
|
- xfs_reclaim_work_queue(mp);
|
|
|
|
-
|
|
|
|
- trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void
|
|
|
|
-xfs_perag_clear_reclaim_tag(
|
|
|
|
- struct xfs_perag *pag)
|
|
|
|
-{
|
|
|
|
- struct xfs_mount *mp = pag->pag_mount;
|
|
|
|
-
|
|
|
|
- ASSERT(spin_is_locked(&pag->pag_ici_lock));
|
|
|
|
- if (--pag->pag_ici_reclaimable)
|
|
|
|
- return;
|
|
|
|
-
|
|
|
|
- /* clear the reclaim tag from the perag radix tree */
|
|
|
|
- spin_lock(&mp->m_perag_lock);
|
|
|
|
- radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno,
|
|
|
|
- XFS_ICI_RECLAIM_TAG);
|
|
|
|
- spin_unlock(&mp->m_perag_lock);
|
|
|
|
- trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * We set the inode flag atomically with the radix tree tag.
|
|
|
|
- * Once we get tag lookups on the radix tree, this inode flag
|
|
|
|
- * can go away.
|
|
|
|
- */
|
|
|
|
-void
|
|
|
|
-xfs_inode_set_reclaim_tag(
|
|
|
|
- struct xfs_inode *ip)
|
|
|
|
-{
|
|
|
|
- struct xfs_mount *mp = ip->i_mount;
|
|
|
|
- struct xfs_perag *pag;
|
|
|
|
-
|
|
|
|
- pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
|
|
|
|
- spin_lock(&pag->pag_ici_lock);
|
|
|
|
- spin_lock(&ip->i_flags_lock);
|
|
|
|
-
|
|
|
|
- radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino),
|
|
|
|
- XFS_ICI_RECLAIM_TAG);
|
|
|
|
- xfs_perag_set_reclaim_tag(pag);
|
|
|
|
- __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
|
|
|
|
-
|
|
|
|
- spin_unlock(&ip->i_flags_lock);
|
|
|
|
- spin_unlock(&pag->pag_ici_lock);
|
|
|
|
- xfs_perag_put(pag);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-STATIC void
|
|
|
|
-xfs_inode_clear_reclaim_tag(
|
|
|
|
- struct xfs_perag *pag,
|
|
|
|
- xfs_ino_t ino)
|
|
|
|
-{
|
|
|
|
- radix_tree_tag_clear(&pag->pag_ici_root,
|
|
|
|
- XFS_INO_TO_AGINO(pag->pag_mount, ino),
|
|
|
|
- XFS_ICI_RECLAIM_TAG);
|
|
|
|
- xfs_perag_clear_reclaim_tag(pag);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Grab the inode for reclaim exclusively.
|
|
* Grab the inode for reclaim exclusively.
|
|
* Return 0 if we grabbed it, non-zero otherwise.
|
|
* Return 0 if we grabbed it, non-zero otherwise.
|