diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2020-12-17 15:08:58 -0500 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@gmail.com> | 2020-12-17 15:55:45 -0500 |
commit | 6e007529f71ad21871f1d918b72378ed56af5cd4 (patch) | |
tree | 7d60ffc4d58be57f8fcd5923e982af7d06a3b8fd /fs/bcachefs/btree_gc.c | |
parent | bc47a622bf2bb3bdb0e290f1cfe0ab5d3da75021 (diff) |
bcachefs: Reduce/kill BKEY_PADDED usetest
With various newer key types - stripe keys, inline data extents - the
old approach of calculating the maximum size of the value is becoming
more and more error prone. Better to switch to bkey_on_stack, which can
dynamically allocate if necessary to handle any size bkey.
In particular we also want to get rid of BKEY_EXTENT_VAL_U64s_MAX.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Diffstat (limited to 'fs/bcachefs/btree_gc.c')
-rw-r--r-- | fs/bcachefs/btree_gc.c | 22 |
1 files changed, 12 insertions, 10 deletions
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c index 6268ea637d19..44eb8ff4b17d 100644 --- a/fs/bcachefs/btree_gc.c +++ b/fs/bcachefs/btree_gc.c @@ -8,7 +8,7 @@ #include "alloc_background.h" #include "alloc_foreground.h" #include "bkey_methods.h" -#include "bkey_on_stack.h" +#include "bkey_buf.h" #include "btree_locking.h" #include "btree_update_interior.h" #include "btree_io.h" @@ -268,10 +268,12 @@ static int bch2_gc_btree_init_recurse(struct bch_fs *c, struct btree *b, struct btree_and_journal_iter iter; struct bkey_s_c k; struct bpos next_node_start = b->data->min_key; + struct bkey_buf tmp; u8 max_stale = 0; int ret = 0; bch2_btree_and_journal_iter_init_node_iter(&iter, journal_keys, b); + bch2_bkey_buf_init(&tmp); while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) { bch2_bkey_debugcheck(c, b, k); @@ -285,10 +287,9 @@ static int bch2_gc_btree_init_recurse(struct bch_fs *c, struct btree *b, if (b->c.level) { struct btree *child; - BKEY_PADDED(k) tmp; - bkey_reassemble(&tmp.k, k); - k = bkey_i_to_s_c(&tmp.k); + bch2_bkey_buf_reassemble(&tmp, c, k); + k = bkey_i_to_s_c(tmp.k); bch2_btree_and_journal_iter_advance(&iter); @@ -300,7 +301,7 @@ static int bch2_gc_btree_init_recurse(struct bch_fs *c, struct btree *b, break; if (b->c.level > target_depth) { - child = bch2_btree_node_get_noiter(c, &tmp.k, + child = bch2_btree_node_get_noiter(c, tmp.k, b->c.btree_id, b->c.level - 1); ret = PTR_ERR_OR_ZERO(child); if (ret) @@ -318,6 +319,7 @@ static int bch2_gc_btree_init_recurse(struct bch_fs *c, struct btree *b, } } + bch2_bkey_buf_exit(&tmp, c); return ret; } @@ -930,10 +932,10 @@ static int bch2_gc_btree_gens(struct bch_fs *c, enum btree_id btree_id) struct btree_trans trans; struct btree_iter *iter; struct bkey_s_c k; - struct bkey_on_stack sk; + struct bkey_buf sk; int ret = 0; - bkey_on_stack_init(&sk); + bch2_bkey_buf_init(&sk); bch2_trans_init(&trans, c, 0, 0); iter = bch2_trans_get_iter(&trans, btree_id, POS_MIN, @@ -942,7 +944,7 @@ static int bch2_gc_btree_gens(struct bch_fs *c, enum btree_id btree_id) while ((k = bch2_btree_iter_peek(iter)).k && !(ret = bkey_err(k))) { if (gc_btree_gens_key(c, k)) { - bkey_on_stack_reassemble(&sk, c, k); + bch2_bkey_buf_reassemble(&sk, c, k); bch2_extent_normalize(c, bkey_i_to_s(sk.k)); bch2_btree_iter_set_pos(iter, bkey_start_pos(&sk.k->k)); @@ -962,7 +964,7 @@ static int bch2_gc_btree_gens(struct bch_fs *c, enum btree_id btree_id) } bch2_trans_exit(&trans); - bkey_on_stack_exit(&sk, c); + bch2_bkey_buf_exit(&sk, c); return ret; } @@ -1074,7 +1076,7 @@ static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter, } if (bch2_keylist_realloc(&keylist, NULL, 0, - (BKEY_U64s + BKEY_EXTENT_U64s_MAX) * nr_old_nodes)) { + BKEY_BTREE_PTR_U64s_MAX * nr_old_nodes)) { trace_btree_gc_coalesce_fail(c, BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC); return; |