summaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2016-05-18 14:20:08 +1000
committerDave Chinner <david@fromorbit.com>2016-05-18 14:20:08 +1000
commitad438c4038968e5ca5248f851212634e474983e8 (patch)
tree28d6b8ba7400608dc1a403187b0f8482f6aa706c /fs/xfs
parent545c0889d26d47e1139c527002eb131343d13b63 (diff)
downloadop-kernel-dev-ad438c4038968e5ca5248f851212634e474983e8.zip
op-kernel-dev-ad438c4038968e5ca5248f851212634e474983e8.tar.gz
xfs: move reclaim tagging functions
Rearrange the inode tagging functions so that they are higher up in xfs_cache.c and so there is no need for forward prototypes to be defined. This is purely code movement, no other change. Signed-off-by: Dave Chinner <dchinner@redhat.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/xfs_icache.c234
1 files changed, 116 insertions, 118 deletions
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 789f8c3..99ee6eee 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -37,8 +37,6 @@
#include <linux/kthread.h>
#include <linux/freezer.h>
-STATIC void xfs_inode_clear_reclaim_tag(struct xfs_perag *pag, xfs_ino_t ino);
-
/*
* Allocate and initialise an xfs_inode.
*/
@@ -144,6 +142,122 @@ xfs_inode_free(
}
/*
+ * Queue a new inode reclaim pass if there are reclaimable inodes and there
+ * isn't a reclaim pass already in progress. By default it runs every 5s based
+ * on the xfs periodic sync default of 30s. Perhaps this should have it's own
+ * tunable, but that can be done if this method proves to be ineffective or too
+ * aggressive.
+ */
+static void
+xfs_reclaim_work_queue(
+ struct xfs_mount *mp)
+{
+
+ rcu_read_lock();
+ if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
+ queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
+ msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
+ }
+ rcu_read_unlock();
+}
+
+/*
+ * This is a fast pass over the inode cache to try to get reclaim moving on as
+ * many inodes as possible in a short period of time. It kicks itself every few
+ * seconds, as well as being kicked by the inode cache shrinker when memory
+ * goes low. It scans as quickly as possible avoiding locked inodes or those
+ * already being flushed, and once done schedules a future pass.
+ */
+void
+xfs_reclaim_worker(
+ struct work_struct *work)
+{
+ struct xfs_mount *mp = container_of(to_delayed_work(work),
+ struct xfs_mount, m_reclaim_work);
+
+ xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
+ xfs_reclaim_work_queue(mp);
+}
+
+static void
+xfs_perag_set_reclaim_tag(
+ struct xfs_perag *pag)
+{
+ struct xfs_mount *mp = pag->pag_mount;
+
+ ASSERT(spin_is_locked(&pag->pag_ici_lock));
+ if (pag->pag_ici_reclaimable++)
+ return;
+
+ /* propagate the reclaim tag up into the perag radix tree */
+ spin_lock(&mp->m_perag_lock);
+ radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno,
+ XFS_ICI_RECLAIM_TAG);
+ spin_unlock(&mp->m_perag_lock);
+
+ /* schedule periodic background inode reclaim */
+ xfs_reclaim_work_queue(mp);
+
+ trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
+}
+
+static void
+xfs_perag_clear_reclaim_tag(
+ struct xfs_perag *pag)
+{
+ struct xfs_mount *mp = pag->pag_mount;
+
+ ASSERT(spin_is_locked(&pag->pag_ici_lock));
+ if (--pag->pag_ici_reclaimable)
+ return;
+
+ /* clear the reclaim tag from the perag radix tree */
+ spin_lock(&mp->m_perag_lock);
+ radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno,
+ XFS_ICI_RECLAIM_TAG);
+ spin_unlock(&mp->m_perag_lock);
+ trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
+}
+
+
+/*
+ * We set the inode flag atomically with the radix tree tag.
+ * Once we get tag lookups on the radix tree, this inode flag
+ * can go away.
+ */
+void
+xfs_inode_set_reclaim_tag(
+ struct xfs_inode *ip)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_perag *pag;
+
+ pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
+ spin_lock(&pag->pag_ici_lock);
+ spin_lock(&ip->i_flags_lock);
+
+ radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino),
+ XFS_ICI_RECLAIM_TAG);
+ xfs_perag_set_reclaim_tag(pag);
+ __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
+
+ spin_unlock(&ip->i_flags_lock);
+ spin_unlock(&pag->pag_ici_lock);
+ xfs_perag_put(pag);
+}
+
+STATIC void
+xfs_inode_clear_reclaim_tag(
+ struct xfs_perag *pag,
+ xfs_ino_t ino)
+{
+ radix_tree_tag_clear(&pag->pag_ici_root,
+ XFS_INO_TO_AGINO(pag->pag_mount, ino),
+ XFS_ICI_RECLAIM_TAG);
+ xfs_perag_clear_reclaim_tag(pag);
+}
+
+/*
* When we recycle a reclaimable inode, we need to re-initialise the VFS inode
* part of the structure. This is made more complex by the fact we store
* information about the on-disk values in the VFS inode and so we can't just
@@ -729,122 +843,6 @@ xfs_inode_ag_iterator_tag(
}
/*
- * Queue a new inode reclaim pass if there are reclaimable inodes and there
- * isn't a reclaim pass already in progress. By default it runs every 5s based
- * on the xfs periodic sync default of 30s. Perhaps this should have it's own
- * tunable, but that can be done if this method proves to be ineffective or too
- * aggressive.
- */
-static void
-xfs_reclaim_work_queue(
- struct xfs_mount *mp)
-{
-
- rcu_read_lock();
- if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
- queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
- msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
- }
- rcu_read_unlock();
-}
-
-/*
- * This is a fast pass over the inode cache to try to get reclaim moving on as
- * many inodes as possible in a short period of time. It kicks itself every few
- * seconds, as well as being kicked by the inode cache shrinker when memory
- * goes low. It scans as quickly as possible avoiding locked inodes or those
- * already being flushed, and once done schedules a future pass.
- */
-void
-xfs_reclaim_worker(
- struct work_struct *work)
-{
- struct xfs_mount *mp = container_of(to_delayed_work(work),
- struct xfs_mount, m_reclaim_work);
-
- xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
- xfs_reclaim_work_queue(mp);
-}
-
-static void
-xfs_perag_set_reclaim_tag(
- struct xfs_perag *pag)
-{
- struct xfs_mount *mp = pag->pag_mount;
-
- ASSERT(spin_is_locked(&pag->pag_ici_lock));
- if (pag->pag_ici_reclaimable++)
- return;
-
- /* propagate the reclaim tag up into the perag radix tree */
- spin_lock(&mp->m_perag_lock);
- radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno,
- XFS_ICI_RECLAIM_TAG);
- spin_unlock(&mp->m_perag_lock);
-
- /* schedule periodic background inode reclaim */
- xfs_reclaim_work_queue(mp);
-
- trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
-}
-
-static void
-xfs_perag_clear_reclaim_tag(
- struct xfs_perag *pag)
-{
- struct xfs_mount *mp = pag->pag_mount;
-
- ASSERT(spin_is_locked(&pag->pag_ici_lock));
- if (--pag->pag_ici_reclaimable)
- return;
-
- /* clear the reclaim tag from the perag radix tree */
- spin_lock(&mp->m_perag_lock);
- radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno,
- XFS_ICI_RECLAIM_TAG);
- spin_unlock(&mp->m_perag_lock);
- trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
-}
-
-
-/*
- * We set the inode flag atomically with the radix tree tag.
- * Once we get tag lookups on the radix tree, this inode flag
- * can go away.
- */
-void
-xfs_inode_set_reclaim_tag(
- struct xfs_inode *ip)
-{
- struct xfs_mount *mp = ip->i_mount;
- struct xfs_perag *pag;
-
- pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
- spin_lock(&pag->pag_ici_lock);
- spin_lock(&ip->i_flags_lock);
-
- radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino),
- XFS_ICI_RECLAIM_TAG);
- xfs_perag_set_reclaim_tag(pag);
- __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
-
- spin_unlock(&ip->i_flags_lock);
- spin_unlock(&pag->pag_ici_lock);
- xfs_perag_put(pag);
-}
-
-STATIC void
-xfs_inode_clear_reclaim_tag(
- struct xfs_perag *pag,
- xfs_ino_t ino)
-{
- radix_tree_tag_clear(&pag->pag_ici_root,
- XFS_INO_TO_AGINO(pag->pag_mount, ino),
- XFS_ICI_RECLAIM_TAG);
- xfs_perag_clear_reclaim_tag(pag);
-}
-
-/*
* Grab the inode for reclaim exclusively.
* Return 0 if we grabbed it, non-zero otherwise.
*/
OpenPOWER on IntegriCloud