diff options
Diffstat (limited to 'fs')
-rw-r--r-- | fs/bcachefs/alloc_background.c | 2 | ||||
-rw-r--r-- | fs/bcachefs/btree_gc.c | 31 | ||||
-rw-r--r-- | fs/bcachefs/btree_io.c | 50 | ||||
-rw-r--r-- | fs/bcachefs/btree_node_scan.c | 29 | ||||
-rw-r--r-- | fs/bcachefs/errcode.h | 6 | ||||
-rw-r--r-- | fs/bcachefs/extents.c | 4 | ||||
-rw-r--r-- | fs/bcachefs/fsck.c | 7 | ||||
-rw-r--r-- | fs/bcachefs/io_write.c | 7 | ||||
-rw-r--r-- | fs/bcachefs/journal.c | 3 | ||||
-rw-r--r-- | fs/bcachefs/journal_io.c | 2 | ||||
-rw-r--r-- | fs/bcachefs/recovery.c | 7 | ||||
-rw-r--r-- | fs/bcachefs/super.c | 2 |
12 files changed, 83 insertions, 67 deletions
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c index f1d35b7f3fc5..d5cb07c7f4d6 100644 --- a/fs/bcachefs/alloc_background.c +++ b/fs/bcachefs/alloc_background.c @@ -389,7 +389,7 @@ void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out) if (k.k->type == KEY_TYPE_alloc_v4) { void *src, *dst; - *out = *bkey_s_c_to_alloc_v4(k).v; + bkey_val_copy(out, bkey_s_c_to_alloc_v4(k)); src = alloc_v4_backpointers(out); SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s); diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c index 34cb8a4324dc..e95bb6849aef 100644 --- a/fs/bcachefs/btree_gc.c +++ b/fs/bcachefs/btree_gc.c @@ -44,10 +44,6 @@ #include <linux/rcupdate.h> #include <linux/sched/task.h> -#define DROP_THIS_NODE 10 -#define DROP_PREV_NODE 11 -#define DID_FILL_FROM_SCAN 12 - /* * Returns true if it's a btree we can easily reconstruct, or otherwise won't * cause data loss if it's missing: @@ -252,7 +248,7 @@ static int btree_check_node_boundaries(struct btree_trans *trans, struct btree * return ret; *pulled_from_scan = cur->data->min_key; - ret = DID_FILL_FROM_SCAN; + ret = bch_err_throw(c, topology_repair_did_fill_from_scan); } else { if (mustfix_fsck_err(trans, btree_node_topology_bad_min_key, "btree node with incorrect min_key%s", buf.buf)) @@ -263,7 +259,7 @@ static int btree_check_node_boundaries(struct btree_trans *trans, struct btree * if (bpos_ge(prev->data->min_key, cur->data->min_key)) { /* fully? */ if (mustfix_fsck_err(trans, btree_node_topology_overwritten_by_next_node, "btree node overwritten by next node%s", buf.buf)) - ret = DROP_PREV_NODE; + ret = bch_err_throw(c, topology_repair_drop_prev_node); } else { if (mustfix_fsck_err(trans, btree_node_topology_bad_max_key, "btree node with incorrect max_key%s", buf.buf)) @@ -274,7 +270,7 @@ static int btree_check_node_boundaries(struct btree_trans *trans, struct btree * if (bpos_ge(expected_start, cur->data->max_key)) { /* fully? */ if (mustfix_fsck_err(trans, btree_node_topology_overwritten_by_prev_node, "btree node overwritten by prev node%s", buf.buf)) - ret = DROP_THIS_NODE; + ret = bch_err_throw(c, topology_repair_drop_this_node); } else { if (mustfix_fsck_err(trans, btree_node_topology_bad_min_key, "btree node with incorrect min_key%s", buf.buf)) @@ -314,7 +310,7 @@ static int btree_repair_node_end(struct btree_trans *trans, struct btree *b, return ret; *pulled_from_scan = b->key.k.p; - ret = DID_FILL_FROM_SCAN; + ret = bch_err_throw(c, topology_repair_did_fill_from_scan); } else { ret = set_node_max(c, child, b->key.k.p); } @@ -391,15 +387,15 @@ again: ret = lockrestart_do(trans, btree_check_node_boundaries(trans, b, prev, cur, pulled_from_scan)); - if (ret < 0) + if (ret && !bch2_err_matches(ret, BCH_ERR_topology_repair)) goto err; - if (ret == DID_FILL_FROM_SCAN) { + if (bch2_err_matches(ret, BCH_ERR_topology_repair_did_fill_from_scan)) { new_pass = true; ret = 0; } - if (ret == DROP_THIS_NODE) { + if (bch2_err_matches(ret, BCH_ERR_topology_repair_drop_this_node)) { six_unlock_read(&cur->c.lock); bch2_btree_node_evict(trans, cur_k.k); ret = bch2_journal_key_delete(c, b->c.btree_id, @@ -414,7 +410,7 @@ again: six_unlock_read(&prev->c.lock); prev = NULL; - if (ret == DROP_PREV_NODE) { + if (bch2_err_matches(ret, BCH_ERR_topology_repair_drop_prev_node)) { bch_info(c, "dropped prev node"); bch2_btree_node_evict(trans, prev_k.k); ret = bch2_journal_key_delete(c, b->c.btree_id, @@ -436,7 +432,7 @@ again: BUG_ON(cur); ret = lockrestart_do(trans, btree_repair_node_end(trans, b, prev, pulled_from_scan)); - if (ret == DID_FILL_FROM_SCAN) { + if (bch2_err_matches(ret, BCH_ERR_topology_repair_did_fill_from_scan)) { new_pass = true; ret = 0; } @@ -477,7 +473,7 @@ again: six_unlock_read(&cur->c.lock); cur = NULL; - if (ret == DROP_THIS_NODE) { + if (bch2_err_matches(ret, BCH_ERR_topology_repair_drop_this_node)) { bch2_btree_node_evict(trans, cur_k.k); ret = bch2_journal_key_delete(c, b->c.btree_id, b->c.level, cur_k.k->k.p); @@ -504,7 +500,7 @@ again: if (mustfix_fsck_err_on(!have_child, c, btree_node_topology_interior_node_empty, "empty interior btree node at %s", buf.buf)) - ret = DROP_THIS_NODE; + ret = bch_err_throw(c, topology_repair_drop_this_node); err: fsck_err: if (!IS_ERR_OR_NULL(prev)) @@ -521,7 +517,8 @@ fsck_err: bch2_bkey_buf_exit(&prev_k, c); bch2_bkey_buf_exit(&cur_k, c); - bch_err_fn(c, ret); + if (!bch2_err_matches(ret, BCH_ERR_topology_repair)) + bch_err_fn(c, ret); return ret; } @@ -592,7 +589,7 @@ recover: ret = bch2_btree_repair_topology_recurse(trans, b, &pulled_from_scan); six_unlock_read(&b->c.lock); - if (ret == DROP_THIS_NODE) { + if (bch2_err_matches(ret, BCH_ERR_topology_repair_drop_this_node)) { scoped_guard(mutex, &c->btree_cache.lock) bch2_btree_node_hash_remove(&c->btree_cache, b); diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c index bd86dd7151a1..2bd8422eb72c 100644 --- a/fs/bcachefs/btree_io.c +++ b/fs/bcachefs/btree_io.c @@ -563,15 +563,11 @@ static int __btree_err(int ret, struct printbuf *err_msg, const char *fmt, ...) { - if (c->recovery.curr_pass == BCH_RECOVERY_PASS_scan_for_btree_nodes) - return ret == -BCH_ERR_btree_node_read_err_fixable - ? bch_err_throw(c, fsck_fix) - : ret; - + bool in_scan = c->recovery.curr_pass == BCH_RECOVERY_PASS_scan_for_btree_nodes; bool have_retry = false; int ret2; - if (ca) { + if (ca && !in_scan) { bch2_mark_btree_validate_failure(failed, ca->dev_idx); struct extent_ptr_decoded pick; @@ -585,12 +581,14 @@ static int __btree_err(int ret, if (!have_retry && ret == -BCH_ERR_btree_node_read_err_must_retry) ret = bch_err_throw(c, btree_node_read_err_bad_node); - bch2_sb_error_count(c, err_type); + if (!in_scan) + bch2_sb_error_count(c, err_type); bool print_deferred = err_msg && rw == READ && - !(test_bit(BCH_FS_in_fsck, &c->flags) && - c->opts.fix_errors == FSCK_FIX_ask); + (!(test_bit(BCH_FS_in_fsck, &c->flags) && + c->opts.fix_errors == FSCK_FIX_ask) || + in_scan); CLASS(printbuf, out)(); bch2_log_msg_start(c, &out); @@ -603,11 +601,17 @@ static int __btree_err(int ret, va_list args; va_start(args, fmt); prt_vprintf(err_msg, fmt, args); - va_end(args); + va_end(args);; - if (print_deferred) { + if (print_deferred) prt_newline(err_msg); + if (in_scan) + return ret == -BCH_ERR_btree_node_read_err_fixable + ? bch_err_throw(c, fsck_fix) + : ret; + + if (print_deferred) { switch (ret) { case -BCH_ERR_btree_node_read_err_fixable: ret2 = bch2_fsck_err_opt(c, FSCK_CAN_FIX, err_type); @@ -1405,10 +1409,8 @@ static void btree_node_read_work(struct work_struct *work) ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key), &failed, &rb->pick, -1); - if (ret <= 0) { - set_btree_node_read_error(b); + if (ret <= 0) break; - } ca = bch2_dev_get_ioref(c, rb->pick.ptr.dev, READ, BCH_DEV_READ_REF_btree_node_read); rb->have_ioref = ca != NULL; @@ -1442,27 +1444,21 @@ start: bch2_maybe_corrupt_bio(bio, bch2_btree_read_corrupt_ratio); ret = bch2_btree_node_read_done(c, ca, b, &failed, &buf); - if (ret == -BCH_ERR_btree_node_read_err_want_retry || - ret == -BCH_ERR_btree_node_read_err_must_retry) - continue; - - if (ret) - set_btree_node_read_error(b); - - break; + if (ret != -BCH_ERR_btree_node_read_err_want_retry && + ret != -BCH_ERR_btree_node_read_err_must_retry) + break; } bch2_io_failures_to_text(&buf, c, &failed); - if (btree_node_read_error(b)) - bch2_btree_lost_data(c, &buf, b->c.btree_id); - /* * only print retry success if we read from a replica with no errors */ - if (btree_node_read_error(b)) + if (ret) { + set_btree_node_read_error(b); + bch2_btree_lost_data(c, &buf, b->c.btree_id); prt_printf(&buf, "ret %s", bch2_err_str(ret)); - else if (failed.nr) { + } else if (failed.nr) { if (!bch2_dev_io_failures(&failed, rb->pick.ptr.dev)) prt_printf(&buf, "retry success"); else diff --git a/fs/bcachefs/btree_node_scan.c b/fs/bcachefs/btree_node_scan.c index f4f958f4615d..c518e18ce5dc 100644 --- a/fs/bcachefs/btree_node_scan.c +++ b/fs/bcachefs/btree_node_scan.c @@ -155,17 +155,6 @@ static void try_read_btree_node(struct find_btree_nodes *f, struct bch_dev *ca, if (BTREE_NODE_LEVEL(bn) >= BTREE_MAX_DEPTH) return; - if (BTREE_NODE_ID(bn) >= BTREE_ID_NR_MAX) - return; - - bio_reset(bio, ca->disk_sb.bdev, REQ_OP_READ); - bio->bi_iter.bi_sector = offset; - bch2_bio_map(bio, b->data, c->opts.btree_node_size); - - submit_time = local_clock(); - submit_bio_wait(bio); - bch2_account_io_completion(ca, BCH_MEMBER_ERROR_read, submit_time, !bio->bi_status); - rcu_read_lock(); struct found_btree_node n = { .btree_id = BTREE_NODE_ID(bn), @@ -182,10 +171,26 @@ static void try_read_btree_node(struct find_btree_nodes *f, struct bch_dev *ca, }; rcu_read_unlock(); + bio_reset(bio, ca->disk_sb.bdev, REQ_OP_READ); + bio->bi_iter.bi_sector = offset; + bch2_bio_map(bio, b->data, c->opts.btree_node_size); + + submit_time = local_clock(); + submit_bio_wait(bio); + bch2_account_io_completion(ca, BCH_MEMBER_ERROR_read, submit_time, !bio->bi_status); + found_btree_node_to_key(&b->key, &n); CLASS(printbuf, buf)(); - if (!bch2_btree_node_read_done(c, ca, b, NULL, &buf)) { + + found_btree_node_to_text(&buf, c, &n); + prt_newline(&buf); + + int ret = bch2_btree_node_read_done(c, ca, b, NULL, &buf); + + bch_verbose(ca, "attempted to read, ret %s\n%s", bch2_err_str(ret), buf.buf); + + if (!ret) { /* read_done will swap out b->data for another buffer */ bn = b->data; /* diff --git a/fs/bcachefs/errcode.h b/fs/bcachefs/errcode.h index 5db48c01f8e6..cec8b0f47d3d 100644 --- a/fs/bcachefs/errcode.h +++ b/fs/bcachefs/errcode.h @@ -218,9 +218,13 @@ x(EINVAL, varint_decode_error) \ x(EINVAL, erasure_coding_found_btree_node) \ x(EINVAL, option_negative) \ + x(EINVAL, topology_repair) \ + x(BCH_ERR_topology_repair, topology_repair_drop_this_node) \ + x(BCH_ERR_topology_repair, topology_repair_drop_prev_node) \ + x(BCH_ERR_topology_repair, topology_repair_did_fill_from_scan) \ x(EOPNOTSUPP, may_not_use_incompat_feature) \ x(EOPNOTSUPP, no_casefolding_without_utf8) \ - x(EOPNOTSUPP, casefolding_disabled) \ + x(EOPNOTSUPP, casefolding_disabled) \ x(EOPNOTSUPP, casefold_opt_is_dir_only) \ x(EOPNOTSUPP, unsupported_fsx_flag) \ x(EOPNOTSUPP, unsupported_fa_flag) \ diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c index 8152ef1cbbcd..b879a586b7f6 100644 --- a/fs/bcachefs/extents.c +++ b/fs/bcachefs/extents.c @@ -282,9 +282,9 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k, if (have_pick) return 1; - if (!have_dirty_ptrs) + if (!have_dirty_ptrs && !bkey_is_btree_ptr(k.k)) return 0; - if (have_missing_devs) + if (have_missing_devs || !have_dirty_ptrs) return bch_err_throw(c, no_device_to_read_from); if (have_csum_errors) return bch_err_throw(c, data_read_csum_err); diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c index df0aa2522b18..183b88bbd402 100644 --- a/fs/bcachefs/fsck.c +++ b/fs/bcachefs/fsck.c @@ -1975,6 +1975,10 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter, } } + ret = check_extent_overbig(trans, iter, k); + if (ret) + goto err; + ret = bch2_trans_commit(trans, res, NULL, BCH_TRANS_COMMIT_no_enospc); if (ret) goto err; @@ -2021,8 +2025,7 @@ int bch2_check_extents(struct bch_fs *c) POS(BCACHEFS_ROOT_INO, 0), BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, ({ bch2_disk_reservation_put(c, &res); - check_extent(trans, &iter, k, &w, &s, &extent_ends, &res) ?: - check_extent_overbig(trans, &iter, k); + check_extent(trans, &iter, k, &w, &s, &extent_ends, &res); })) ?: check_i_sectors_notnested(trans, &w); diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c index d7620138e038..44b02d4b6502 100644 --- a/fs/bcachefs/io_write.c +++ b/fs/bcachefs/io_write.c @@ -89,7 +89,12 @@ void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw) new = ewma_add(old, io_latency, 5); } while (!atomic64_try_cmpxchg(latency, &old, new)); - bch2_congested_acct(ca, io_latency, now, rw); + /* + * Only track read latency for congestion accounting: writes are subject + * to heavy queuing delays from page cache writeback: + */ + if (rw == READ) + bch2_congested_acct(ca, io_latency, now, rw); __bch2_time_stats_update(&ca->io_latency[rw].stats, submit_time, now); } diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c index 376c9b3b20fe..97760e89e5a3 100644 --- a/fs/bcachefs/journal.c +++ b/fs/bcachefs/journal.c @@ -198,7 +198,8 @@ void bch2_journal_do_writes(struct journal *j) if (!journal_state_seq_count(j, j->reservations, seq)) { j->seq_write_started = seq; w->write_started = true; - closure_call(&w->io, bch2_journal_write, j->wq, &c->cl); + closure_get(&c->cl); + closure_call(&w->io, bch2_journal_write, j->wq, NULL); } break; diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c index bb4a1a7e48e5..47224666d07e 100644 --- a/fs/bcachefs/journal_io.c +++ b/fs/bcachefs/journal_io.c @@ -1821,7 +1821,7 @@ static CLOSURE_CALLBACK(journal_write_done) if (do_discards) bch2_do_discards(c); - closure_return(cl); + closure_put(&c->cl); } static void journal_write_endio(struct bio *bio) diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c index 58c159e5f10d..304473dac268 100644 --- a/fs/bcachefs/recovery.c +++ b/fs/bcachefs/recovery.c @@ -67,13 +67,16 @@ int bch2_btree_lost_data(struct bch_fs *c, ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_backpointers_to_extents, 0, &write_sb) ?: ret; #endif + write_sb |= !__test_and_set_bit_le64(BCH_FSCK_ERR_lru_entry_bad, ext->errors_silent); + write_sb |= !__test_and_set_bit_le64(BCH_FSCK_ERR_backpointer_to_missing_ptr, ext->errors_silent); + write_sb |= !__test_and_set_bit_le64(BCH_FSCK_ERR_alloc_key_data_type_wrong, ext->errors_silent); + write_sb |= !__test_and_set_bit_le64(BCH_FSCK_ERR_alloc_key_dirty_sectors_wrong, ext->errors_silent); + switch (btree) { case BTREE_ID_alloc: ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_alloc_info, 0, &write_sb) ?: ret; - write_sb |= !__test_and_set_bit_le64(BCH_FSCK_ERR_alloc_key_data_type_wrong, ext->errors_silent); write_sb |= !__test_and_set_bit_le64(BCH_FSCK_ERR_alloc_key_gen_wrong, ext->errors_silent); - write_sb |= !__test_and_set_bit_le64(BCH_FSCK_ERR_alloc_key_dirty_sectors_wrong, ext->errors_silent); write_sb |= !__test_and_set_bit_le64(BCH_FSCK_ERR_alloc_key_cached_sectors_wrong, ext->errors_silent); write_sb |= !__test_and_set_bit_le64(BCH_FSCK_ERR_alloc_key_stripe_wrong, ext->errors_silent); write_sb |= !__test_and_set_bit_le64(BCH_FSCK_ERR_alloc_key_stripe_redundancy_wrong, ext->errors_silent); diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c index 0fc0b2221036..b3b2d8353a36 100644 --- a/fs/bcachefs/super.c +++ b/fs/bcachefs/super.c @@ -729,6 +729,8 @@ void __bch2_fs_stop(struct bch_fs *c) cancel_work_sync(&ca->io_error_work); cancel_work_sync(&c->read_only_work); + + flush_work(&c->btree_interior_update_work); } void bch2_fs_free(struct bch_fs *c) |