summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDarrick J. Wong <djwong@kernel.org>2021-07-09 11:05:48 -0700
committerDarrick J. Wong <djwong@kernel.org>2021-08-25 22:25:59 -0700
commit4cf18001845083e35676c913ddcc7ae41b914b64 (patch)
treed08c99508a485e9787d43603e0f13c879fa0d90c
parent176fd00779949a9f87cfc7f157f78eb86c6c0d84 (diff)
xfs: clear log incompat feature bits when the log is idlelog-use-incompat-features_2021-08-25
When there are no ongoing transactions and the log contents have been checkpointed back into the filesystem, the log performs 'covering', which is to say that it log a dummy transaction to record the fact that the tail has caught up with the head. This is a good time to clear log incompat feature flags, because they are flags that are temporarily set to limit the range of kernels that can replay a dirty log. Since it's possible that some other higher level thread is about to start logging items protected by a log incompat flag, we create a rwsem so that upper level threads can coordinate this with the log. It would probably be more performant to use a percpu rwsem, but the ability to /try/ taking the write lock during covering is critical, and percpu rwsems do not provide that. Signed-off-by: Darrick J. Wong <djwong@kernel.org>
-rw-r--r--fs/xfs/xfs_log.c49
-rw-r--r--fs/xfs/xfs_log.h3
-rw-r--r--fs/xfs/xfs_log_priv.h3
3 files changed, 55 insertions, 0 deletions
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index c70161efe6bb..3cf265c5374e 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1363,6 +1363,32 @@ xfs_log_work_queue(
}
/*
+ * Clear the log incompat flags if we have the opportunity.
+ *
+ * This only happens if we're about to log the second dummy transaction as part
+ * of covering the log and we can get the log incompat feature usage lock.
+ */
+static inline void
+xlog_clear_incompat(
+ struct xlog *log)
+{
+ struct xfs_mount *mp = log->l_mp;
+
+ if (!xfs_sb_has_incompat_log_feature(&mp->m_sb,
+ XFS_SB_FEAT_INCOMPAT_LOG_ALL))
+ return;
+
+ if (log->l_covered_state != XLOG_STATE_COVER_DONE2)
+ return;
+
+ if (!down_write_trylock(&log->l_incompat_users))
+ return;
+
+ xfs_clear_incompat_log_features(mp);
+ up_write(&log->l_incompat_users);
+}
+
+/*
* Every sync period we need to unpin all items in the AIL and push them to
* disk. If there is nothing dirty, then we might need to cover the log to
* indicate that the filesystem is idle.
@@ -1388,6 +1414,7 @@ xfs_log_worker(
* synchronously log the superblock instead to ensure the
* superblock is immediately unpinned and can be written back.
*/
+ xlog_clear_incompat(log);
xfs_sync_sb(mp, true);
} else
xfs_log_force(mp, 0);
@@ -1475,6 +1502,8 @@ xlog_alloc_log(
}
log->l_sectBBsize = 1 << log2_size;
+ init_rwsem(&log->l_incompat_users);
+
xlog_get_iclog_buffer_size(mp, log);
spin_lock_init(&log->l_icloglock);
@@ -3974,3 +4003,23 @@ xfs_log_in_recovery(
return log->l_flags & XLOG_ACTIVE_RECOVERY;
}
+
+/*
+ * Notify the log that we're about to start using a feature that is protected
+ * by a log incompat feature flag. This will prevent log covering from
+ * clearing those flags.
+ */
+void
+xlog_use_incompat_feat(
+ struct xlog *log)
+{
+ down_read(&log->l_incompat_users);
+}
+
+/* Notify the log that we've finished using log incompat features. */
+void
+xlog_drop_incompat_feat(
+ struct xlog *log)
+{
+ up_read(&log->l_incompat_users);
+}
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index 813b972e9788..b274fb9dcd8d 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -142,4 +142,7 @@ bool xfs_log_in_recovery(struct xfs_mount *);
xfs_lsn_t xlog_grant_push_threshold(struct xlog *log, int need_bytes);
+void xlog_use_incompat_feat(struct xlog *log);
+void xlog_drop_incompat_feat(struct xlog *log);
+
#endif /* __XFS_LOG_H__ */
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index f3e79a45d60a..6953f86f866c 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -456,6 +456,9 @@ struct xlog {
xfs_lsn_t l_recovery_lsn;
uint32_t l_iclog_roundoff;/* padding roundoff */
+
+ /* Users of log incompat features should take a read lock. */
+ struct rw_semaphore l_incompat_users;
};
#define XLOG_BUF_CANCEL_BUCKET(log, blkno) \