summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDarrick J. Wong <darrick.wong@oracle.com>2020-02-19 17:02:13 -0800
committerDarrick J. Wong <darrick.wong@oracle.com>2020-06-01 21:16:36 -0700
commit64bb9ca7fc027d4a56ace91956d160e4721d6e76 (patch)
tree2cc6ce10939d7ebe5e07cf829a2d5ab88e063cf9
parent2ce81633b2cfb87e74558efe282ea0248a67daff (diff)
xfs: create a polled function to force inode inactivationdeferred-inactivation_2020-06-01
Create a polled version of xfs_inactive_force so that we can force inactivation while holding a lock (usually the umount lock) without tripping over the softlockup timer. This is for callers that hold vfs locks while calling inactivation, which is currently unmount, iunlink processing during mount, and rw->ro remount. Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
-rw-r--r--fs/xfs/xfs_icache.c47
-rw-r--r--fs/xfs/xfs_icache.h2
-rw-r--r--fs/xfs/xfs_mount.c2
-rw-r--r--fs/xfs/xfs_mount.h5
-rw-r--r--fs/xfs/xfs_super.c3
5 files changed, 56 insertions, 3 deletions
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 999b8eee02c2..640cbcbd5f21 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -25,6 +25,7 @@
#include "xfs_health.h"
#include <linux/iversion.h>
+#include <linux/nmi.h>
STATIC int xfs_inode_free_eofblocks(struct xfs_inode *ip, void *args);
STATIC int xfs_inode_free_cowblocks(struct xfs_inode *ip, void *args);
@@ -2246,8 +2247,12 @@ xfs_inactive_inodes(
struct xfs_mount *mp,
struct xfs_eofblocks *eofb)
{
- return __xfs_inode_walk(mp, XFS_INODE_WALK_INACTIVE,
+ int error;
+
+ error = __xfs_inode_walk(mp, XFS_INODE_WALK_INACTIVE,
xfs_inactive_inode, eofb, XFS_ICI_INACTIVE_TAG);
+ wake_up(&mp->m_inactive_wait);
+ return error;
}
/* Try to get inode inactivation moving. */
@@ -2277,6 +2282,7 @@ xfs_inactive_worker(
if (error && error != -EAGAIN)
xfs_err(mp, "inode inactivation failed, error %d", error);
+ wake_up(&mp->m_inactive_wait);
sb_end_write(mp->m_super);
xfs_inactive_work_queue(pag);
}
@@ -2355,3 +2361,42 @@ xfs_inactive_schedule_now(
spin_unlock(&pag->pag_ici_lock);
}
}
+
+/* Return true if there are inodes still being inactivated. */
+static bool
+xfs_inactive_pending(
+ struct xfs_mount *mp)
+{
+ struct xfs_perag *pag;
+ xfs_agnumber_t agno = 0;
+ bool ret = false;
+
+ while (!ret &&
+ (pag = xfs_perag_get_tag(mp, agno, XFS_ICI_INACTIVE_TAG))) {
+ agno = pag->pag_agno + 1;
+ spin_lock(&pag->pag_ici_lock);
+ if (pag->pag_ici_inactive)
+ ret = true;
+ spin_unlock(&pag->pag_ici_lock);
+ xfs_perag_put(pag);
+ }
+
+ return ret;
+}
+
+/*
+ * Flush all pending inactivation work and poll until finished. This function
+ * is for callers that must flush with vfs locks held, such as unmount,
+ * remount, and iunlinks processing during mount.
+ */
+void
+xfs_inactive_force_poll(
+ struct xfs_mount *mp)
+{
+ xfs_inactive_schedule_now(mp);
+
+ while (!wait_event_timeout(mp->m_inactive_wait,
+ xfs_inactive_pending(mp) == false, HZ)) {
+ touch_softlockup_watchdog();
+ }
+}
diff --git a/fs/xfs/xfs_icache.h b/fs/xfs/xfs_icache.h
index 5850bf885452..6031208a492d 100644
--- a/fs/xfs/xfs_icache.h
+++ b/fs/xfs/xfs_icache.h
@@ -84,4 +84,6 @@ void xfs_inactive_shutdown(struct xfs_mount *mp);
void xfs_inactive_cancel_work(struct xfs_mount *mp);
void xfs_inactive_schedule_now(struct xfs_mount *mp);
+void xfs_inactive_force_poll(struct xfs_mount *mp);
+
#endif
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 702a6541c376..c9eaabad086d 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -1066,7 +1066,7 @@ xfs_unmountfs(
* Since this can involve finobt updates, do it now before we lose the
* per-AG space reservations.
*/
- xfs_inactive_force(mp);
+ xfs_inactive_force_poll(mp);
xfs_blockgc_stop(mp);
xfs_fs_unreserve_ag_blocks(mp);
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index c67cf932ef0c..c573f0d044f0 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -206,6 +206,11 @@ typedef struct xfs_mount {
unsigned int *m_errortag;
struct xfs_kobj m_errortag_kobj;
#endif
+ /*
+ * Use this to wait for the inode inactivation workqueue to finish
+ * inactivating all the inodes.
+ */
+ struct wait_queue_head m_inactive_wait;
} xfs_mount_t;
#define M_IGEO(mp) (&(mp)->m_ino_geo)
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index c021618f7588..978e01e0b9c6 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1725,7 +1725,7 @@ xfs_remount_ro(
* Since this can involve finobt updates, do it now before we lose the
* per-AG space reservations.
*/
- xfs_inactive_force(mp);
+ xfs_inactive_force_poll(mp);
/* Free the per-AG metadata reservation pool. */
error = xfs_fs_unreserve_ag_blocks(mp);
@@ -1848,6 +1848,7 @@ static int xfs_init_fs_context(
INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
mp->m_kobj.kobject.kset = xfs_kset;
+ init_waitqueue_head(&mp->m_inactive_wait);
/*
* We don't create the finobt per-ag space reservation until after log
* recovery, so we must set this to true so that an ifree transaction