summaryrefslogtreecommitdiff
path: root/fs/bcachefs/btree_node_scan.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/bcachefs/btree_node_scan.c')
-rw-r--r--fs/bcachefs/btree_node_scan.c42
1 files changed, 19 insertions, 23 deletions
diff --git a/fs/bcachefs/btree_node_scan.c b/fs/bcachefs/btree_node_scan.c
index cc7af8fe689e..f4f958f4615d 100644
--- a/fs/bcachefs/btree_node_scan.c
+++ b/fs/bcachefs/btree_node_scan.c
@@ -65,16 +65,6 @@ static void found_btree_node_to_key(struct bkey_i *k, const struct found_btree_n
memcpy(bp->v.start, f->ptrs, sizeof(struct bch_extent_ptr) * f->nr_ptrs);
}
-static inline u64 bkey_journal_seq(struct bkey_s_c k)
-{
- switch (k.k->type) {
- case KEY_TYPE_inode_v3:
- return le64_to_cpu(bkey_s_c_to_inode_v3(k).v->bi_journal_seq);
- default:
- return 0;
- }
-}
-
static int found_btree_node_cmp_cookie(const void *_l, const void *_r)
{
const struct found_btree_node *l = _l;
@@ -196,25 +186,25 @@ static void try_read_btree_node(struct find_btree_nodes *f, struct bch_dev *ca,
CLASS(printbuf, buf)();
if (!bch2_btree_node_read_done(c, ca, b, NULL, &buf)) {
+ /* read_done will swap out b->data for another buffer */
+ bn = b->data;
/*
* Grab journal_seq here because we want the max journal_seq of
* any bset; read_done sorts down to a single set and picks the
* max journal_seq
*/
- n.journal_seq = le64_to_cpu(b->data->keys.journal_seq),
+ n.journal_seq = le64_to_cpu(bn->keys.journal_seq),
n.sectors_written = b->written;
- mutex_lock(&f->lock);
+ guard(mutex)(&f->lock);
if (BSET_BIG_ENDIAN(&bn->keys) != CPU_BIG_ENDIAN) {
bch_err(c, "try_read_btree_node() can't handle endian conversion");
f->ret = -EINVAL;
- goto unlock;
+ return;
}
if (darray_push(&f->nodes, n))
f->ret = -ENOMEM;
-unlock:
- mutex_unlock(&f->lock);
}
}
@@ -224,15 +214,17 @@ static int read_btree_nodes_worker(void *p)
struct bch_fs *c = container_of(w->f, struct bch_fs, found_btree_nodes);
struct bch_dev *ca = w->ca;
unsigned long last_print = jiffies;
+ struct btree *b = NULL;
+ struct bio *bio = NULL;
- struct btree *b = __bch2_btree_node_mem_alloc(c);
+ b = __bch2_btree_node_mem_alloc(c);
if (!b) {
bch_err(c, "read_btree_nodes_worker: error allocating buf");
w->f->ret = -ENOMEM;
goto err;
}
- struct bio *bio = bio_alloc(NULL, buf_pages(b->data, c->opts.btree_node_size), 0, GFP_KERNEL);
+ bio = bio_alloc(NULL, buf_pages(b->data, c->opts.btree_node_size), 0, GFP_KERNEL);
if (!bio) {
bch_err(c, "read_btree_nodes_worker: error allocating bio");
w->f->ret = -ENOMEM;
@@ -278,6 +270,9 @@ static int read_btree_nodes(struct find_btree_nodes *f)
int ret = 0;
closure_init_stack(&cl);
+ CLASS(printbuf, buf)();
+
+ prt_printf(&buf, "scanning for btree nodes on");
for_each_online_member(c, ca, BCH_DEV_READ_REF_btree_node_scan) {
if (!(ca->mi.data_allowed & BIT(BCH_DATA_btree)))
@@ -303,10 +298,14 @@ static int read_btree_nodes(struct find_btree_nodes *f)
break;
}
+ prt_printf(&buf, " %s", ca->name);
+
closure_get(&cl);
enumerated_ref_get(&ca->io_ref[READ], BCH_DEV_READ_REF_btree_node_scan);
wake_up_process(t);
}
+
+ bch_notice(c, "%s", buf.buf);
err:
while (closure_sync_timeout(&cl, sysctl_hung_task_timeout_secs * HZ / 2))
;
@@ -367,7 +366,7 @@ static int handle_overwrites(struct bch_fs *c,
int bch2_scan_for_btree_nodes(struct bch_fs *c)
{
struct find_btree_nodes *f = &c->found_btree_nodes;
- struct printbuf buf = PRINTBUF;
+ CLASS(printbuf, buf)();
found_btree_nodes nodes_heap = {};
size_t dst;
int ret = 0;
@@ -474,7 +473,6 @@ int bch2_scan_for_btree_nodes(struct bch_fs *c)
eytzinger0_sort(f->nodes.data, f->nodes.nr, sizeof(f->nodes.data[0]), found_btree_node_cmp_pos, NULL);
err:
darray_exit(&nodes_heap);
- printbuf_exit(&buf);
return ret;
}
@@ -546,7 +544,7 @@ int bch2_get_scanned_nodes(struct bch_fs *c, enum btree_id btree,
return ret;
if (c->opts.verbose) {
- struct printbuf buf = PRINTBUF;
+ CLASS(printbuf, buf)();
prt_str(&buf, "recovery ");
bch2_btree_id_level_to_text(&buf, btree, level);
@@ -556,7 +554,6 @@ int bch2_get_scanned_nodes(struct bch_fs *c, enum btree_id btree,
bch2_bpos_to_text(&buf, node_max);
bch_info(c, "%s(): %s", __func__, buf.buf);
- printbuf_exit(&buf);
}
struct found_btree_node search = {
@@ -580,10 +577,9 @@ int bch2_get_scanned_nodes(struct bch_fs *c, enum btree_id btree,
found_btree_node_to_key(&tmp.k, &n);
if (c->opts.verbose) {
- struct printbuf buf = PRINTBUF;
+ CLASS(printbuf, buf)();
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&tmp.k));
bch_verbose(c, "%s(): recovering %s", __func__, buf.buf);
- printbuf_exit(&buf);
}
BUG_ON(bch2_bkey_validate(c, bkey_i_to_s_c(&tmp.k),