diff options
Diffstat (limited to 'fs/bcachefs')
-rw-r--r-- | fs/bcachefs/alloc_background.c | 18 | ||||
-rw-r--r-- | fs/bcachefs/btree_gc.c | 2 | ||||
-rw-r--r-- | fs/bcachefs/btree_io.c | 26 | ||||
-rw-r--r-- | fs/bcachefs/btree_node_scan.c | 13 | ||||
-rw-r--r-- | fs/bcachefs/fs-io-direct.c | 2 | ||||
-rw-r--r-- | fs/bcachefs/fsck.c | 7 | ||||
-rw-r--r-- | fs/bcachefs/io_write.c | 7 | ||||
-rw-r--r-- | fs/bcachefs/journal.c | 3 | ||||
-rw-r--r-- | fs/bcachefs/journal_io.c | 2 | ||||
-rw-r--r-- | fs/bcachefs/super.c | 2 |
10 files changed, 55 insertions, 27 deletions
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c index 4c1604fd80f9..d5cb07c7f4d6 100644 --- a/fs/bcachefs/alloc_background.c +++ b/fs/bcachefs/alloc_background.c @@ -337,9 +337,10 @@ void bch2_alloc_v4_swab(struct bkey_s k) } static inline void __bch2_alloc_v4_to_text(struct printbuf *out, struct bch_fs *c, - unsigned dev, const struct bch_alloc_v4 *a) + struct bkey_s_c k, + const struct bch_alloc_v4 *a) { - struct bch_dev *ca = c ? bch2_dev_tryget_noerror(c, dev) : NULL; + struct bch_dev *ca = c ? bch2_dev_tryget_noerror(c, k.k->p.inode) : NULL; prt_newline(out); printbuf_indent_add(out, 2); @@ -348,11 +349,14 @@ static inline void __bch2_alloc_v4_to_text(struct printbuf *out, struct bch_fs * bch2_prt_data_type(out, a->data_type); prt_newline(out); prt_printf(out, "journal_seq_nonempty %llu\n", a->journal_seq_nonempty); - prt_printf(out, "journal_seq_empty %llu\n", a->journal_seq_empty); + if (bkey_val_bytes(k.k) > offsetof(struct bch_alloc_v4, journal_seq_empty)) + prt_printf(out, "journal_seq_empty %llu\n", a->journal_seq_empty); + prt_printf(out, "need_discard %llu\n", BCH_ALLOC_V4_NEED_DISCARD(a)); prt_printf(out, "need_inc_gen %llu\n", BCH_ALLOC_V4_NEED_INC_GEN(a)); prt_printf(out, "dirty_sectors %u\n", a->dirty_sectors); - prt_printf(out, "stripe_sectors %u\n", a->stripe_sectors); + if (bkey_val_bytes(k.k) > offsetof(struct bch_alloc_v4, stripe_sectors)) + prt_printf(out, "stripe_sectors %u\n", a->stripe_sectors); prt_printf(out, "cached_sectors %u\n", a->cached_sectors); prt_printf(out, "stripe %u\n", a->stripe); prt_printf(out, "stripe_redundancy %u\n", a->stripe_redundancy); @@ -372,12 +376,12 @@ void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c struct bch_alloc_v4 _a; const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a); - __bch2_alloc_v4_to_text(out, c, k.k->p.inode, a); + __bch2_alloc_v4_to_text(out, c, k, a); } void bch2_alloc_v4_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) { - __bch2_alloc_v4_to_text(out, c, k.k->p.inode, bkey_s_c_to_alloc_v4(k).v); + __bch2_alloc_v4_to_text(out, c, k, bkey_s_c_to_alloc_v4(k).v); } void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out) @@ -385,7 +389,7 @@ void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out) if (k.k->type == KEY_TYPE_alloc_v4) { void *src, *dst; - *out = *bkey_s_c_to_alloc_v4(k).v; + bkey_val_copy(out, bkey_s_c_to_alloc_v4(k)); src = alloc_v4_backpointers(out); SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s); diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c index f094606722a3..e95bb6849aef 100644 --- a/fs/bcachefs/btree_gc.c +++ b/fs/bcachefs/btree_gc.c @@ -387,7 +387,7 @@ again: ret = lockrestart_do(trans, btree_check_node_boundaries(trans, b, prev, cur, pulled_from_scan)); - if (ret < 0) + if (ret && !bch2_err_matches(ret, BCH_ERR_topology_repair)) goto err; if (bch2_err_matches(ret, BCH_ERR_topology_repair_did_fill_from_scan)) { diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c index bd86dd7151a1..6c243703972c 100644 --- a/fs/bcachefs/btree_io.c +++ b/fs/bcachefs/btree_io.c @@ -563,15 +563,11 @@ static int __btree_err(int ret, struct printbuf *err_msg, const char *fmt, ...) { - if (c->recovery.curr_pass == BCH_RECOVERY_PASS_scan_for_btree_nodes) - return ret == -BCH_ERR_btree_node_read_err_fixable - ? bch_err_throw(c, fsck_fix) - : ret; - + bool in_scan = c->recovery.curr_pass == BCH_RECOVERY_PASS_scan_for_btree_nodes; bool have_retry = false; int ret2; - if (ca) { + if (ca && !in_scan) { bch2_mark_btree_validate_failure(failed, ca->dev_idx); struct extent_ptr_decoded pick; @@ -585,12 +581,14 @@ static int __btree_err(int ret, if (!have_retry && ret == -BCH_ERR_btree_node_read_err_must_retry) ret = bch_err_throw(c, btree_node_read_err_bad_node); - bch2_sb_error_count(c, err_type); + if (!in_scan) + bch2_sb_error_count(c, err_type); bool print_deferred = err_msg && rw == READ && - !(test_bit(BCH_FS_in_fsck, &c->flags) && - c->opts.fix_errors == FSCK_FIX_ask); + (!(test_bit(BCH_FS_in_fsck, &c->flags) && + c->opts.fix_errors == FSCK_FIX_ask) || + in_scan); CLASS(printbuf, out)(); bch2_log_msg_start(c, &out); @@ -603,11 +601,17 @@ static int __btree_err(int ret, va_list args; va_start(args, fmt); prt_vprintf(err_msg, fmt, args); - va_end(args); + va_end(args);; - if (print_deferred) { + if (print_deferred) prt_newline(err_msg); + if (in_scan) + return ret == -BCH_ERR_btree_node_read_err_fixable + ? bch_err_throw(c, fsck_fix) + : ret; + + if (print_deferred) { switch (ret) { case -BCH_ERR_btree_node_read_err_fixable: ret2 = bch2_fsck_err_opt(c, FSCK_CAN_FIX, err_type); diff --git a/fs/bcachefs/btree_node_scan.c b/fs/bcachefs/btree_node_scan.c index f4f958f4615d..71fcc8a9152a 100644 --- a/fs/bcachefs/btree_node_scan.c +++ b/fs/bcachefs/btree_node_scan.c @@ -155,9 +155,6 @@ static void try_read_btree_node(struct find_btree_nodes *f, struct bch_dev *ca, if (BTREE_NODE_LEVEL(bn) >= BTREE_MAX_DEPTH) return; - if (BTREE_NODE_ID(bn) >= BTREE_ID_NR_MAX) - return; - bio_reset(bio, ca->disk_sb.bdev, REQ_OP_READ); bio->bi_iter.bi_sector = offset; bch2_bio_map(bio, b->data, c->opts.btree_node_size); @@ -185,7 +182,15 @@ static void try_read_btree_node(struct find_btree_nodes *f, struct bch_dev *ca, found_btree_node_to_key(&b->key, &n); CLASS(printbuf, buf)(); - if (!bch2_btree_node_read_done(c, ca, b, NULL, &buf)) { + + found_btree_node_to_text(&buf, c, &n); + prt_newline(&buf); + + int ret = bch2_btree_node_read_done(c, ca, b, NULL, &buf); + + bch_verbose(ca, "attempted to read, ret %s\n%s", bch2_err_str(ret), buf.buf); + + if (!ret) { /* read_done will swap out b->data for another buffer */ bn = b->data; /* diff --git a/fs/bcachefs/fs-io-direct.c b/fs/bcachefs/fs-io-direct.c index 73d44875faf2..e53fee0513fd 100644 --- a/fs/bcachefs/fs-io-direct.c +++ b/fs/bcachefs/fs-io-direct.c @@ -127,7 +127,7 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter) * the dirtying of requests that are internal from the kernel (i.e. from * loopback), because we'll deadlock on page_lock. */ - dio->should_dirty = iter_is_iovec(iter); + dio->should_dirty = user_backed_iter(iter); blk_start_plug(&plug); diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c index df0aa2522b18..183b88bbd402 100644 --- a/fs/bcachefs/fsck.c +++ b/fs/bcachefs/fsck.c @@ -1975,6 +1975,10 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter, } } + ret = check_extent_overbig(trans, iter, k); + if (ret) + goto err; + ret = bch2_trans_commit(trans, res, NULL, BCH_TRANS_COMMIT_no_enospc); if (ret) goto err; @@ -2021,8 +2025,7 @@ int bch2_check_extents(struct bch_fs *c) POS(BCACHEFS_ROOT_INO, 0), BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, ({ bch2_disk_reservation_put(c, &res); - check_extent(trans, &iter, k, &w, &s, &extent_ends, &res) ?: - check_extent_overbig(trans, &iter, k); + check_extent(trans, &iter, k, &w, &s, &extent_ends, &res); })) ?: check_i_sectors_notnested(trans, &w); diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c index d7620138e038..44b02d4b6502 100644 --- a/fs/bcachefs/io_write.c +++ b/fs/bcachefs/io_write.c @@ -89,7 +89,12 @@ void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw) new = ewma_add(old, io_latency, 5); } while (!atomic64_try_cmpxchg(latency, &old, new)); - bch2_congested_acct(ca, io_latency, now, rw); + /* + * Only track read latency for congestion accounting: writes are subject + * to heavy queuing delays from page cache writeback: + */ + if (rw == READ) + bch2_congested_acct(ca, io_latency, now, rw); __bch2_time_stats_update(&ca->io_latency[rw].stats, submit_time, now); } diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c index de03e20f6e30..97760e89e5a3 100644 --- a/fs/bcachefs/journal.c +++ b/fs/bcachefs/journal.c @@ -182,6 +182,8 @@ journal_error_check_stuck(struct journal *j, int error, unsigned flags) void bch2_journal_do_writes(struct journal *j) { + struct bch_fs *c = container_of(j, struct bch_fs, journal); + for (u64 seq = journal_last_unwritten_seq(j); seq <= journal_cur_seq(j); seq++) { @@ -196,6 +198,7 @@ void bch2_journal_do_writes(struct journal *j) if (!journal_state_seq_count(j, j->reservations, seq)) { j->seq_write_started = seq; w->write_started = true; + closure_get(&c->cl); closure_call(&w->io, bch2_journal_write, j->wq, NULL); } diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c index 2835250a14c4..47224666d07e 100644 --- a/fs/bcachefs/journal_io.c +++ b/fs/bcachefs/journal_io.c @@ -1820,6 +1820,8 @@ static CLOSURE_CALLBACK(journal_write_done) if (do_discards) bch2_do_discards(c); + + closure_put(&c->cl); } static void journal_write_endio(struct bio *bio) diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c index 0fc0b2221036..b3b2d8353a36 100644 --- a/fs/bcachefs/super.c +++ b/fs/bcachefs/super.c @@ -729,6 +729,8 @@ void __bch2_fs_stop(struct bch_fs *c) cancel_work_sync(&ca->io_error_work); cancel_work_sync(&c->read_only_work); + + flush_work(&c->btree_interior_update_work); } void bch2_fs_free(struct bch_fs *c) |