summaryrefslogtreecommitdiff
path: root/libbcachefs/btree_gc.c
diff options
context:
space:
mode:
Diffstat (limited to 'libbcachefs/btree_gc.c')
-rw-r--r--libbcachefs/btree_gc.c426
1 files changed, 118 insertions, 308 deletions
diff --git a/libbcachefs/btree_gc.c b/libbcachefs/btree_gc.c
index ecbd9598..e1e899d5 100644
--- a/libbcachefs/btree_gc.c
+++ b/libbcachefs/btree_gc.c
@@ -52,12 +52,6 @@ static struct bkey_s unsafe_bkey_s_c_to_s(struct bkey_s_c k)
}}};
}
-static bool should_restart_for_topology_repair(struct bch_fs *c)
-{
- return c->opts.fix_errors != FSCK_FIX_no &&
- !(c->recovery_passes_complete & BIT_ULL(BCH_RECOVERY_PASS_check_topology));
-}
-
static inline void __gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
{
preempt_disable();
@@ -546,9 +540,9 @@ reconstruct_root:
if (!bch2_btree_has_scanned_nodes(c, i)) {
mustfix_fsck_err(c, btree_root_unreadable_and_scan_found_nothing,
"no nodes found for btree %s, continue?", bch2_btree_id_str(i));
- bch2_btree_root_alloc_fake(c, i, 0);
+ bch2_btree_root_alloc_fake_trans(trans, i, 0);
} else {
- bch2_btree_root_alloc_fake(c, i, 1);
+ bch2_btree_root_alloc_fake_trans(trans, i, 1);
bch2_shoot_down_journal_keys(c, i, 1, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
ret = bch2_get_scanned_nodes(c, i, 0, POS_MIN, SPOS_MAX);
if (ret)
@@ -576,7 +570,7 @@ reconstruct_root:
goto reconstruct_root;
bch_err(c, "empty btree root %s", bch2_btree_id_str(i));
- bch2_btree_root_alloc_fake(c, i, 0);
+ bch2_btree_root_alloc_fake_trans(trans, i, 0);
r->alive = false;
ret = 0;
}
@@ -603,7 +597,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
* use check_bucket_ref here
*/
bkey_for_each_ptr_decode(k->k, ptrs_c, p, entry_c) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
+ struct bch_dev *ca = bch2_dev_bkey_exists(c, p.ptr.dev);
struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, p, entry_c);
@@ -736,7 +730,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
*/
struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
bkey_for_each_ptr(ptrs, ptr) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
struct bucket *g = PTR_GC_BUCKET(ca, ptr);
ptr->gen = g->gen;
@@ -747,7 +741,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
restart_drop_ptrs:
ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
bkey_for_each_ptr_decode(bkey_i_to_s(new).k, ptrs, p, entry) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
+ struct bch_dev *ca = bch2_dev_bkey_exists(c, p.ptr.dev);
struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
enum bch_data_type data_type = bch2_bkey_ptr_data_type(bkey_i_to_s_c(new), p, entry);
@@ -862,7 +856,7 @@ static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
ret = commit_do(trans, NULL, NULL, 0,
bch2_key_trigger(trans, btree_id, level, old,
- unsafe_bkey_s_c_to_s(*k), BTREE_TRIGGER_GC));
+ unsafe_bkey_s_c_to_s(*k), BTREE_TRIGGER_gc));
fsck_err:
err:
printbuf_exit(&buf);
@@ -872,8 +866,7 @@ err:
static int btree_gc_mark_node(struct btree_trans *trans, struct btree *b, bool initial)
{
- struct btree_node_iter iter;
- struct bkey unpacked;
+ struct btree_and_journal_iter iter;
struct bkey_s_c k;
int ret = 0;
@@ -881,36 +874,33 @@ static int btree_gc_mark_node(struct btree_trans *trans, struct btree *b, bool i
if (ret)
return ret;
- if (!btree_node_type_needs_gc(btree_node_type(b)))
- return 0;
-
- bch2_btree_node_iter_init_from_start(&iter, b);
+ bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b);
- while ((k = bch2_btree_node_iter_peek_unpack(&iter, b, &unpacked)).k) {
- ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level, false,
- &k, initial);
+ while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
+ ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level, false, &k, initial);
if (ret)
- return ret;
+ break;
- bch2_btree_node_iter_advance(&iter, b);
+ bch2_btree_and_journal_iter_advance(&iter);
}
- return 0;
+ bch2_btree_and_journal_iter_exit(&iter);
+ return ret;
}
static int bch2_gc_btree(struct btree_trans *trans, enum btree_id btree_id,
- bool initial, bool metadata_only)
+ bool initial)
{
struct bch_fs *c = trans->c;
struct btree_iter iter;
struct btree *b;
- unsigned depth = metadata_only ? 1 : 0;
+ unsigned target_depth = btree_type_has_ptrs(btree_id) ? 0 : 1;
int ret = 0;
gc_pos_set(c, gc_pos_btree(btree_id, POS_MIN, 0));
__for_each_btree_node(trans, iter, btree_id, POS_MIN,
- 0, depth, BTREE_ITER_PREFETCH, b, ret) {
+ 0, target_depth, BTREE_ITER_prefetch, b, ret) {
bch2_verify_btree_nr_keys(b);
gc_pos_set(c, gc_pos_btree_node(b));
@@ -941,126 +931,61 @@ static int bch2_gc_btree(struct btree_trans *trans, enum btree_id btree_id,
static int bch2_gc_btree_init_recurse(struct btree_trans *trans, struct btree *b,
unsigned target_depth)
{
- struct bch_fs *c = trans->c;
- struct btree_and_journal_iter iter;
- struct bkey_s_c k;
- struct bkey_buf cur;
- struct printbuf buf = PRINTBUF;
- int ret = 0;
-
- ret = bch2_btree_node_check_topology(trans, b);
+ int ret = btree_gc_mark_node(trans, b, true);
if (ret)
return ret;
- bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b);
- bch2_bkey_buf_init(&cur);
-
- while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
- BUG_ON(bpos_lt(k.k->p, b->data->min_key));
- BUG_ON(bpos_gt(k.k->p, b->data->max_key));
-
- ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level,
- false, &k, true);
- if (ret)
- goto fsck_err;
-
- bch2_btree_and_journal_iter_advance(&iter);
- }
-
if (b->c.level > target_depth) {
- bch2_btree_and_journal_iter_exit(&iter);
+ struct bch_fs *c = trans->c;
+ struct btree_and_journal_iter iter;
+ struct bkey_s_c k;
+ struct bkey_buf cur;
+
+ bch2_bkey_buf_init(&cur);
bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b);
iter.prefetch = true;
while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
- struct btree *child;
-
bch2_bkey_buf_reassemble(&cur, c, k);
bch2_btree_and_journal_iter_advance(&iter);
- child = bch2_btree_node_get_noiter(trans, cur.k,
+ struct btree *child =
+ bch2_btree_node_get_noiter(trans, cur.k,
b->c.btree_id, b->c.level - 1,
false);
ret = PTR_ERR_OR_ZERO(child);
-
- if (bch2_err_matches(ret, EIO)) {
- bch2_topology_error(c);
-
- if (__fsck_err(c,
- FSCK_CAN_FIX|
- FSCK_CAN_IGNORE|
- FSCK_NO_RATELIMIT,
- btree_node_read_error,
- "Unreadable btree node at btree %s level %u:\n"
- " %s",
- bch2_btree_id_str(b->c.btree_id),
- b->c.level - 1,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(cur.k)), buf.buf)) &&
- should_restart_for_topology_repair(c)) {
- bch_info(c, "Halting mark and sweep to start topology repair pass");
- ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
- goto fsck_err;
- } else {
- /* Continue marking when opted to not
- * fix the error: */
- ret = 0;
- set_bit(BCH_FS_initial_gc_unfixed, &c->flags);
- continue;
- }
- } else if (ret) {
- bch_err_msg(c, ret, "getting btree node");
+ bch_err_msg(c, ret, "getting btree node");
+ if (ret)
break;
- }
- ret = bch2_gc_btree_init_recurse(trans, child,
- target_depth);
+ ret = bch2_gc_btree_init_recurse(trans, child, target_depth);
six_unlock_read(&child->c.lock);
if (ret)
break;
}
+
+ bch2_bkey_buf_exit(&cur, c);
+ bch2_btree_and_journal_iter_exit(&iter);
}
-fsck_err:
- bch2_bkey_buf_exit(&cur, c);
- bch2_btree_and_journal_iter_exit(&iter);
- printbuf_exit(&buf);
+
return ret;
}
static int bch2_gc_btree_init(struct btree_trans *trans,
- enum btree_id btree_id,
- bool metadata_only)
+ enum btree_id btree_id)
{
struct bch_fs *c = trans->c;
- struct btree *b;
- unsigned target_depth = metadata_only ? 1 : 0;
- struct printbuf buf = PRINTBUF;
+ /*
+ * We need to make sure every leaf node is readable before going RW
+ unsigned target_depth = btree_node_type_needs_gc(__btree_node_type(0, btree_id)) ? 0 : 1;
+ */
+ unsigned target_depth = 0;
int ret = 0;
- b = bch2_btree_id_root(c, btree_id)->b;
+ struct btree *b = bch2_btree_id_root(c, btree_id)->b;
six_lock_read(&b->c.lock, NULL, NULL);
- printbuf_reset(&buf);
- bch2_bpos_to_text(&buf, b->data->min_key);
- if (mustfix_fsck_err_on(!bpos_eq(b->data->min_key, POS_MIN), c,
- btree_root_bad_min_key,
- "btree root with incorrect min_key: %s", buf.buf)) {
- bch_err(c, "repair unimplemented");
- ret = -BCH_ERR_fsck_repair_unimplemented;
- goto fsck_err;
- }
-
- printbuf_reset(&buf);
- bch2_bpos_to_text(&buf, b->data->max_key);
- if (mustfix_fsck_err_on(!bpos_eq(b->data->max_key, SPOS_MAX), c,
- btree_root_bad_max_key,
- "btree root with incorrect max_key: %s", buf.buf)) {
- bch_err(c, "repair unimplemented");
- ret = -BCH_ERR_fsck_repair_unimplemented;
- goto fsck_err;
- }
-
if (b->c.level >= target_depth)
ret = bch2_gc_btree_init_recurse(trans, b, target_depth);
@@ -1070,11 +995,9 @@ static int bch2_gc_btree_init(struct btree_trans *trans,
ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level + 1, true,
&k, true);
}
-fsck_err:
six_unlock_read(&b->c.lock);
bch_err_fn(c, ret);
- printbuf_exit(&buf);
return ret;
}
@@ -1084,7 +1007,7 @@ static inline int btree_id_gc_phase_cmp(enum btree_id l, enum btree_id r)
(int) btree_id_to_gc_phase(r);
}
-static int bch2_gc_btrees(struct bch_fs *c, bool initial, bool metadata_only)
+static int bch2_gc_btrees(struct bch_fs *c, bool initial)
{
struct btree_trans *trans = bch2_trans_get(c);
enum btree_id ids[BTREE_ID_NR];
@@ -1095,98 +1018,38 @@ static int bch2_gc_btrees(struct bch_fs *c, bool initial, bool metadata_only)
ids[i] = i;
bubble_sort(ids, BTREE_ID_NR, btree_id_gc_phase_cmp);
- for (i = 0; i < BTREE_ID_NR && !ret; i++)
- ret = initial
- ? bch2_gc_btree_init(trans, ids[i], metadata_only)
- : bch2_gc_btree(trans, ids[i], initial, metadata_only);
+ for (i = 0; i < btree_id_nr_alive(c) && !ret; i++) {
+ unsigned btree = i < BTREE_ID_NR ? ids[i] : i;
- for (i = BTREE_ID_NR; i < btree_id_nr_alive(c) && !ret; i++) {
- if (!bch2_btree_id_root(c, i)->alive)
+ if (IS_ERR_OR_NULL(bch2_btree_id_root(c, btree)->b))
continue;
ret = initial
- ? bch2_gc_btree_init(trans, i, metadata_only)
- : bch2_gc_btree(trans, i, initial, metadata_only);
+ ? bch2_gc_btree_init(trans, btree)
+ : bch2_gc_btree(trans, btree, initial);
+
+ if (mustfix_fsck_err_on(bch2_err_matches(ret, EIO),
+ c, btree_node_read_error,
+ "btree node read error for %s",
+ bch2_btree_id_str(btree)))
+ ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
}
-
+fsck_err:
bch2_trans_put(trans);
bch_err_fn(c, ret);
return ret;
}
-static void mark_metadata_sectors(struct bch_fs *c, struct bch_dev *ca,
- u64 start, u64 end,
- enum bch_data_type type,
- unsigned flags)
-{
- u64 b = sector_to_bucket(ca, start);
-
- do {
- unsigned sectors =
- min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
-
- bch2_mark_metadata_bucket(c, ca, b, type, sectors,
- gc_phase(GC_PHASE_SB), flags);
- b++;
- start += sectors;
- } while (start < end);
-}
-
-static void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca,
- unsigned flags)
-{
- struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
- unsigned i;
- u64 b;
-
- for (i = 0; i < layout->nr_superblocks; i++) {
- u64 offset = le64_to_cpu(layout->sb_offset[i]);
-
- if (offset == BCH_SB_SECTOR)
- mark_metadata_sectors(c, ca, 0, BCH_SB_SECTOR,
- BCH_DATA_sb, flags);
-
- mark_metadata_sectors(c, ca, offset,
- offset + (1 << layout->sb_max_size_bits),
- BCH_DATA_sb, flags);
- }
-
- for (i = 0; i < ca->journal.nr; i++) {
- b = ca->journal.buckets[i];
- bch2_mark_metadata_bucket(c, ca, b, BCH_DATA_journal,
- ca->mi.bucket_size,
- gc_phase(GC_PHASE_SB), flags);
- }
-}
-
-static void bch2_mark_superblocks(struct bch_fs *c)
+static int bch2_mark_superblocks(struct bch_fs *c)
{
mutex_lock(&c->sb_lock);
gc_pos_set(c, gc_phase(GC_PHASE_SB));
- for_each_online_member(c, ca)
- bch2_mark_dev_superblock(c, ca, BTREE_TRIGGER_GC);
+ int ret = bch2_trans_mark_dev_sbs_flags(c, BTREE_TRIGGER_gc);
mutex_unlock(&c->sb_lock);
+ return ret;
}
-#if 0
-/* Also see bch2_pending_btree_node_free_insert_done() */
-static void bch2_mark_pending_btree_node_frees(struct bch_fs *c)
-{
- struct btree_update *as;
- struct pending_btree_node_free *d;
-
- mutex_lock(&c->btree_interior_update_lock);
- gc_pos_set(c, gc_phase(GC_PHASE_PENDING_DELETE));
-
- for_each_pending_btree_node_free(c, as, d)
- if (d->index_update_done)
- bch2_mark_key(c, bkey_i_to_s_c(&d->key), BTREE_TRIGGER_GC);
-
- mutex_unlock(&c->btree_interior_update_lock);
-}
-#endif
-
static void bch2_gc_free(struct bch_fs *c)
{
genradix_free(&c->reflink_gc_table);
@@ -1204,28 +1067,23 @@ static void bch2_gc_free(struct bch_fs *c)
c->usage_gc = NULL;
}
-static int bch2_gc_done(struct bch_fs *c,
- bool initial, bool metadata_only)
+static int bch2_gc_done(struct bch_fs *c)
{
struct bch_dev *ca = NULL;
struct printbuf buf = PRINTBUF;
- bool verify = !metadata_only &&
- !c->opts.reconstruct_alloc &&
- (!initial || (c->sb.compat & (1ULL << BCH_COMPAT_alloc_info)));
unsigned i;
int ret = 0;
percpu_down_write(&c->mark_lock);
-#define copy_field(_err, _f, _msg, ...) \
- if (dst->_f != src->_f && \
- (!verify || \
- fsck_err(c, _err, _msg ": got %llu, should be %llu" \
- , ##__VA_ARGS__, dst->_f, src->_f))) \
+#define copy_field(_err, _f, _msg, ...) \
+ if (fsck_err_on(dst->_f != src->_f, c, _err, \
+ _msg ": got %llu, should be %llu" , ##__VA_ARGS__, \
+ dst->_f, src->_f)) \
dst->_f = src->_f
-#define copy_dev_field(_err, _f, _msg, ...) \
+#define copy_dev_field(_err, _f, _msg, ...) \
copy_field(_err, _f, "dev %u has wrong " _msg, ca->dev_idx, ##__VA_ARGS__)
-#define copy_fs_field(_err, _f, _msg, ...) \
+#define copy_fs_field(_err, _f, _msg, ...) \
copy_field(_err, _f, "fs has wrong " _msg, ##__VA_ARGS__)
for (i = 0; i < ARRAY_SIZE(c->usage); i++)
@@ -1258,31 +1116,24 @@ static int bch2_gc_done(struct bch_fs *c,
copy_fs_field(fs_usage_btree_wrong,
b.btree, "btree");
- if (!metadata_only) {
- copy_fs_field(fs_usage_data_wrong,
- b.data, "data");
- copy_fs_field(fs_usage_cached_wrong,
- b.cached, "cached");
- copy_fs_field(fs_usage_reserved_wrong,
- b.reserved, "reserved");
- copy_fs_field(fs_usage_nr_inodes_wrong,
- b.nr_inodes,"nr_inodes");
-
- for (i = 0; i < BCH_REPLICAS_MAX; i++)
- copy_fs_field(fs_usage_persistent_reserved_wrong,
- persistent_reserved[i],
- "persistent_reserved[%i]", i);
- }
+ copy_fs_field(fs_usage_data_wrong,
+ b.data, "data");
+ copy_fs_field(fs_usage_cached_wrong,
+ b.cached, "cached");
+ copy_fs_field(fs_usage_reserved_wrong,
+ b.reserved, "reserved");
+ copy_fs_field(fs_usage_nr_inodes_wrong,
+ b.nr_inodes,"nr_inodes");
+
+ for (i = 0; i < BCH_REPLICAS_MAX; i++)
+ copy_fs_field(fs_usage_persistent_reserved_wrong,
+ persistent_reserved[i],
+ "persistent_reserved[%i]", i);
for (i = 0; i < c->replicas.nr; i++) {
struct bch_replicas_entry_v1 *e =
cpu_replicas_entry(&c->replicas, i);
- if (metadata_only &&
- (e->data_type == BCH_DATA_user ||
- e->data_type == BCH_DATA_cached))
- continue;
-
printbuf_reset(&buf);
bch2_replicas_entry_to_text(&buf, e);
@@ -1361,11 +1212,10 @@ static inline bool bch2_alloc_v4_cmp(struct bch_alloc_v4 l,
static int bch2_alloc_write_key(struct btree_trans *trans,
struct btree_iter *iter,
- struct bkey_s_c k,
- bool metadata_only)
+ struct bkey_s_c k)
{
struct bch_fs *c = trans->c;
- struct bch_dev *ca = bch_dev_bkey_exists(c, iter->pos.inode);
+ struct bch_dev *ca = bch2_dev_bkey_exists(c, iter->pos.inode);
struct bucket old_gc, gc, *b;
struct bkey_i_alloc_v4 *a;
struct bch_alloc_v4 old_convert, new;
@@ -1402,12 +1252,6 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
bch2_dev_usage_update_m(c, ca, &old_gc, &gc);
percpu_up_read(&c->mark_lock);
- if (metadata_only &&
- gc.data_type != BCH_DATA_sb &&
- gc.data_type != BCH_DATA_journal &&
- gc.data_type != BCH_DATA_btree)
- return 0;
-
if (gen_after(old->gen, gc.gen))
return 0;
@@ -1460,12 +1304,12 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
if (a->v.data_type == BCH_DATA_cached && !a->v.io_time[READ])
a->v.io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
- ret = bch2_trans_update(trans, iter, &a->k_i, BTREE_TRIGGER_NORUN);
+ ret = bch2_trans_update(trans, iter, &a->k_i, BTREE_TRIGGER_norun);
fsck_err:
return ret;
}
-static int bch2_gc_alloc_done(struct bch_fs *c, bool metadata_only)
+static int bch2_gc_alloc_done(struct bch_fs *c)
{
int ret = 0;
@@ -1474,9 +1318,9 @@ static int bch2_gc_alloc_done(struct bch_fs *c, bool metadata_only)
for_each_btree_key_upto_commit(trans, iter, BTREE_ID_alloc,
POS(ca->dev_idx, ca->mi.first_bucket),
POS(ca->dev_idx, ca->mi.nbuckets - 1),
- BTREE_ITER_SLOTS|BTREE_ITER_PREFETCH, k,
+ BTREE_ITER_slots|BTREE_ITER_prefetch, k,
NULL, NULL, BCH_TRANS_COMMIT_lazy_rw,
- bch2_alloc_write_key(trans, &iter, k, metadata_only)));
+ bch2_alloc_write_key(trans, &iter, k)));
if (ret) {
percpu_ref_put(&ca->ref);
break;
@@ -1487,7 +1331,7 @@ static int bch2_gc_alloc_done(struct bch_fs *c, bool metadata_only)
return ret;
}
-static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only)
+static int bch2_gc_alloc_start(struct bch_fs *c)
{
for_each_member_device(c, ca) {
struct bucket_array *buckets = kvmalloc(sizeof(struct bucket_array) +
@@ -1506,8 +1350,8 @@ static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only)
int ret = bch2_trans_run(c,
for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
- BTREE_ITER_PREFETCH, k, ({
- struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode);
+ BTREE_ITER_prefetch, k, ({
+ struct bch_dev *ca = bch2_dev_bkey_exists(c, k.k->p.inode);
struct bucket *g = gc_bucket(ca, k.k->p.offset);
struct bch_alloc_v4 a_convert;
@@ -1515,36 +1359,19 @@ static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only)
g->gen_valid = 1;
g->gen = a->gen;
-
- if (metadata_only &&
- (a->data_type == BCH_DATA_user ||
- a->data_type == BCH_DATA_cached ||
- a->data_type == BCH_DATA_parity)) {
- g->data_type = a->data_type;
- g->dirty_sectors = a->dirty_sectors;
- g->cached_sectors = a->cached_sectors;
- g->stripe = a->stripe;
- g->stripe_redundancy = a->stripe_redundancy;
- }
-
0;
})));
bch_err_fn(c, ret);
return ret;
}
-static void bch2_gc_alloc_reset(struct bch_fs *c, bool metadata_only)
+static void bch2_gc_alloc_reset(struct bch_fs *c)
{
for_each_member_device(c, ca) {
struct bucket_array *buckets = gc_bucket_array(ca);
struct bucket *g;
for_each_bucket(g, buckets) {
- if (metadata_only &&
- (g->data_type == BCH_DATA_user ||
- g->data_type == BCH_DATA_cached ||
- g->data_type == BCH_DATA_parity))
- continue;
g->data_type = 0;
g->dirty_sectors = 0;
g->cached_sectors = 0;
@@ -1600,35 +1427,27 @@ fsck_err:
return ret;
}
-static int bch2_gc_reflink_done(struct bch_fs *c, bool metadata_only)
+static int bch2_gc_reflink_done(struct bch_fs *c)
{
size_t idx = 0;
- if (metadata_only)
- return 0;
-
int ret = bch2_trans_run(c,
for_each_btree_key_commit(trans, iter,
BTREE_ID_reflink, POS_MIN,
- BTREE_ITER_PREFETCH, k,
+ BTREE_ITER_prefetch, k,
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
bch2_gc_write_reflink_key(trans, &iter, k, &idx)));
c->reflink_gc_nr = 0;
return ret;
}
-static int bch2_gc_reflink_start(struct bch_fs *c,
- bool metadata_only)
+static int bch2_gc_reflink_start(struct bch_fs *c)
{
-
- if (metadata_only)
- return 0;
-
c->reflink_gc_nr = 0;
int ret = bch2_trans_run(c,
for_each_btree_key(trans, iter, BTREE_ID_reflink, POS_MIN,
- BTREE_ITER_PREFETCH, k, ({
+ BTREE_ITER_prefetch, k, ({
const __le64 *refcount = bkey_refcount_c(k);
if (!refcount)
@@ -1651,7 +1470,7 @@ static int bch2_gc_reflink_start(struct bch_fs *c,
return ret;
}
-static void bch2_gc_reflink_reset(struct bch_fs *c, bool metadata_only)
+static void bch2_gc_reflink_reset(struct bch_fs *c)
{
struct genradix_iter iter;
struct reflink_gc *r;
@@ -1713,20 +1532,17 @@ fsck_err:
return ret;
}
-static int bch2_gc_stripes_done(struct bch_fs *c, bool metadata_only)
+static int bch2_gc_stripes_done(struct bch_fs *c)
{
- if (metadata_only)
- return 0;
-
return bch2_trans_run(c,
for_each_btree_key_commit(trans, iter,
BTREE_ID_stripes, POS_MIN,
- BTREE_ITER_PREFETCH, k,
+ BTREE_ITER_prefetch, k,
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
bch2_gc_write_stripes_key(trans, &iter, k)));
}
-static void bch2_gc_stripes_reset(struct bch_fs *c, bool metadata_only)
+static void bch2_gc_stripes_reset(struct bch_fs *c)
{
genradix_free(&c->gc_stripes);
}
@@ -1736,7 +1552,6 @@ static void bch2_gc_stripes_reset(struct bch_fs *c, bool metadata_only)
*
* @c: filesystem object
* @initial: are we in recovery?
- * @metadata_only: are we just checking metadata references, or everything?
*
* Returns: 0 on success, or standard errcode on failure
*
@@ -1755,7 +1570,7 @@ static void bch2_gc_stripes_reset(struct bch_fs *c, bool metadata_only)
* move around - if references move backwards in the ordering GC
* uses, GC could skip past them
*/
-int bch2_gc(struct bch_fs *c, bool initial, bool metadata_only)
+static int bch2_gc(struct bch_fs *c, bool initial)
{
unsigned iter = 0;
int ret;
@@ -1767,23 +1582,20 @@ int bch2_gc(struct bch_fs *c, bool initial, bool metadata_only)
bch2_btree_interior_updates_flush(c);
ret = bch2_gc_start(c) ?:
- bch2_gc_alloc_start(c, metadata_only) ?:
- bch2_gc_reflink_start(c, metadata_only);
+ bch2_gc_alloc_start(c) ?:
+ bch2_gc_reflink_start(c);
if (ret)
goto out;
again:
gc_pos_set(c, gc_phase(GC_PHASE_START));
- bch2_mark_superblocks(c);
-
- ret = bch2_gc_btrees(c, initial, metadata_only);
+ ret = bch2_mark_superblocks(c);
+ BUG_ON(ret);
+ ret = bch2_gc_btrees(c, initial);
if (ret)
goto out;
-#if 0
- bch2_mark_pending_btree_node_frees(c);
-#endif
c->gc_count++;
if (test_bit(BCH_FS_need_another_gc, &c->flags) ||
@@ -1801,9 +1613,9 @@ again:
clear_bit(BCH_FS_need_another_gc, &c->flags);
__gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING));
- bch2_gc_stripes_reset(c, metadata_only);
- bch2_gc_alloc_reset(c, metadata_only);
- bch2_gc_reflink_reset(c, metadata_only);
+ bch2_gc_stripes_reset(c);
+ bch2_gc_alloc_reset(c);
+ bch2_gc_reflink_reset(c);
ret = bch2_gc_reset(c);
if (ret)
goto out;
@@ -1816,10 +1628,10 @@ out:
if (!ret) {
bch2_journal_block(&c->journal);
- ret = bch2_gc_alloc_done(c, metadata_only) ?:
- bch2_gc_done(c, initial, metadata_only) ?:
- bch2_gc_stripes_done(c, metadata_only) ?:
- bch2_gc_reflink_done(c, metadata_only);
+ ret = bch2_gc_alloc_done(c) ?:
+ bch2_gc_done(c) ?:
+ bch2_gc_stripes_done(c) ?:
+ bch2_gc_reflink_done(c);
bch2_journal_unblock(&c->journal);
}
@@ -1842,6 +1654,11 @@ out:
return ret;
}
+int bch2_check_allocations(struct bch_fs *c)
+{
+ return bch2_gc(c, true);
+}
+
static int gc_btree_gens_key(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_s_c k)
@@ -1853,7 +1670,7 @@ static int gc_btree_gens_key(struct btree_trans *trans,
percpu_down_read(&c->mark_lock);
bkey_for_each_ptr(ptrs, ptr) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
if (ptr_stale(ca, ptr) > 16) {
percpu_up_read(&c->mark_lock);
@@ -1862,7 +1679,7 @@ static int gc_btree_gens_key(struct btree_trans *trans,
}
bkey_for_each_ptr(ptrs, ptr) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
u8 *gen = &ca->oldest_gen[PTR_BUCKET_NR(ca, ptr)];
if (gen_after(*gen, ptr->gen))
@@ -1883,7 +1700,7 @@ update:
static int bch2_alloc_write_oldest_gen(struct btree_trans *trans, struct btree_iter *iter,
struct bkey_s_c k)
{
- struct bch_dev *ca = bch_dev_bkey_exists(trans->c, iter->pos.inode);
+ struct bch_dev *ca = bch2_dev_bkey_exists(trans->c, iter->pos.inode);
struct bch_alloc_v4 a_convert;
const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
struct bkey_i_alloc_v4 *a_mut;
@@ -1944,7 +1761,7 @@ int bch2_gc_gens(struct bch_fs *c)
ret = bch2_trans_run(c,
for_each_btree_key_commit(trans, iter, i,
POS_MIN,
- BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
+ BTREE_ITER_prefetch|BTREE_ITER_all_snapshots,
k,
NULL, NULL,
BCH_TRANS_COMMIT_no_enospc,
@@ -1956,7 +1773,7 @@ int bch2_gc_gens(struct bch_fs *c)
ret = bch2_trans_run(c,
for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
POS_MIN,
- BTREE_ITER_PREFETCH,
+ BTREE_ITER_prefetch,
k,
NULL, NULL,
BCH_TRANS_COMMIT_no_enospc,
@@ -2023,14 +1840,7 @@ static int bch2_gc_thread(void *arg)
last = atomic64_read(&clock->now);
last_kick = atomic_read(&c->kick_gc);
- /*
- * Full gc is currently incompatible with btree key cache:
- */
-#if 0
- ret = bch2_gc(c, false, false);
-#else
bch2_gc_gens(c);
-#endif
debug_check_no_locks_held();
}