summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2024-12-04 19:46:35 -0500
committerKent Overstreet <kent.overstreet@linux.dev>2024-12-04 20:53:04 -0500
commita72b9ffec35f82cc214c23dbfb5955a59b0c94fc (patch)
treee729607ad549ef231788f4dbcc3cacae1490ab47
parentfb92391ef70a46f9f17b766115075dc8c7cef44c (diff)
bcachefs: Make sure __bch2_run_explicit_recovery_pass() signals to rewind
We should always signal to rewind if the requested pass hasn't been run, even if called multiple times. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
-rw-r--r--fs/bcachefs/bcachefs.h1
-rw-r--r--fs/bcachefs/recovery_passes.c26
2 files changed, 14 insertions, 13 deletions
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index b12c9c78beec..e6cd93e1ed0f 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -1044,6 +1044,7 @@ struct bch_fs {
* for signaling to the toplevel code which pass we want to run now.
*/
enum bch_recovery_pass curr_recovery_pass;
+ enum bch_recovery_pass next_recovery_pass;
/* bitmask of recovery passes that we actually ran */
u64 recovery_passes_complete;
/* never rewinds version of curr_recovery_pass */
diff --git a/fs/bcachefs/recovery_passes.c b/fs/bcachefs/recovery_passes.c
index f6d3a99cb63e..34d4299c2365 100644
--- a/fs/bcachefs/recovery_passes.c
+++ b/fs/bcachefs/recovery_passes.c
@@ -103,12 +103,12 @@ u64 bch2_recovery_passes_from_stable(u64 v)
static int __bch2_run_explicit_recovery_pass(struct bch_fs *c,
enum bch_recovery_pass pass)
{
- if (c->opts.recovery_passes & BIT_ULL(pass))
- return 0;
-
if (c->curr_recovery_pass == ARRAY_SIZE(recovery_pass_fns))
return -BCH_ERR_not_in_recovery;
+ if (c->recovery_passes_complete & BIT_ULL(pass))
+ return 0;
+
if (pass < BCH_RECOVERY_PASS_set_may_go_rw &&
c->curr_recovery_pass >= BCH_RECOVERY_PASS_set_may_go_rw) {
bch_info(c, "need recovery pass %s (%u), but already rw",
@@ -122,8 +122,8 @@ static int __bch2_run_explicit_recovery_pass(struct bch_fs *c,
c->opts.recovery_passes |= BIT_ULL(pass);
- if (c->curr_recovery_pass >= pass) {
- c->curr_recovery_pass = pass;
+ if (c->curr_recovery_pass > pass) {
+ c->next_recovery_pass = pass;
c->recovery_passes_complete &= (1ULL << pass) >> 1;
return -BCH_ERR_restart_recovery;
} else {
@@ -265,6 +265,8 @@ int bch2_run_recovery_passes(struct bch_fs *c)
c->opts.recovery_passes_exclude &= ~BCH_RECOVERY_PASS_set_may_go_rw;
while (c->curr_recovery_pass < ARRAY_SIZE(recovery_pass_fns)) {
+ c->next_recovery_pass = c->curr_recovery_pass + 1;
+
spin_lock_irq(&c->recovery_pass_lock);
unsigned pass = c->curr_recovery_pass;
@@ -286,27 +288,25 @@ int bch2_run_recovery_passes(struct bch_fs *c)
bch2_journal_flush(&c->journal);
spin_lock_irq(&c->recovery_pass_lock);
- if (c->curr_recovery_pass < pass) {
+ if (c->next_recovery_pass < c->curr_recovery_pass) {
/*
* bch2_run_explicit_recovery_pass() was called: we
* can't always catch -BCH_ERR_restart_recovery because
* it may have been called from another thread (btree
* node read completion)
*/
- spin_unlock_irq(&c->recovery_pass_lock);
- continue;
- } else if (c->curr_recovery_pass == pass) {
- c->curr_recovery_pass++;
+ ret = 0;
+ c->recovery_passes_complete &= ~(~0ULL << c->curr_recovery_pass);
} else {
- BUG();
+ c->recovery_passes_complete |= BIT_ULL(pass);
+ c->recovery_pass_done = max(c->recovery_pass_done, pass);
}
+ c->curr_recovery_pass = c->next_recovery_pass;
spin_unlock_irq(&c->recovery_pass_lock);
if (ret)
break;
- c->recovery_passes_complete |= BIT_ULL(pass);
- c->recovery_pass_done = max(c->recovery_pass_done, pass);
if (!test_bit(BCH_FS_error, &c->flags))
bch2_clear_recovery_pass_required(c, pass);