summaryrefslogtreecommitdiff
path: root/fs/bcachefs/move.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2020-12-17 15:08:58 -0500
committerKent Overstreet <kent.overstreet@gmail.com>2020-12-17 15:55:45 -0500
commit6e007529f71ad21871f1d918b72378ed56af5cd4 (patch)
tree7d60ffc4d58be57f8fcd5923e982af7d06a3b8fd /fs/bcachefs/move.c
parentbc47a622bf2bb3bdb0e290f1cfe0ab5d3da75021 (diff)
bcachefs: Reduce/kill BKEY_PADDED usetest
With various newer key types - stripe keys, inline data extents - the old approach of calculating the maximum size of the value is becoming more and more error prone. Better to switch to bkey_on_stack, which can dynamically allocate if necessary to handle any size bkey. In particular we also want to get rid of BKEY_EXTENT_VAL_U64s_MAX. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Diffstat (limited to 'fs/bcachefs/move.c')
-rw-r--r--fs/bcachefs/move.c26
1 files changed, 16 insertions, 10 deletions
diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c
index cc1de422453f..fbc5e03fa745 100644
--- a/fs/bcachefs/move.c
+++ b/fs/bcachefs/move.c
@@ -2,7 +2,7 @@
#include "bcachefs.h"
#include "alloc_foreground.h"
-#include "bkey_on_stack.h"
+#include "bkey_buf.h"
#include "btree_gc.h"
#include "btree_update.h"
#include "btree_update_interior.h"
@@ -61,8 +61,13 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
struct migrate_write *m =
container_of(op, struct migrate_write, op);
struct keylist *keys = &op->insert_keys;
+ struct bkey_buf _new, _insert;
int ret = 0;
+ bch2_bkey_buf_init(&_new);
+ bch2_bkey_buf_init(&_insert);
+ bch2_bkey_buf_realloc(&_insert, c, U8_MAX);
+
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
iter = bch2_trans_get_iter(&trans, m->btree_id,
@@ -73,7 +78,6 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
struct bkey_s_c k;
struct bkey_i *insert;
struct bkey_i_extent *new;
- BKEY_PADDED(k) _new, _insert;
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
bool did_work = false;
@@ -93,11 +97,11 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
!bch2_bkey_matches_ptr(c, k, m->ptr, m->offset))
goto nomatch;
- bkey_reassemble(&_insert.k, k);
- insert = &_insert.k;
+ bkey_reassemble(_insert.k, k);
+ insert = _insert.k;
- bkey_copy(&_new.k, bch2_keylist_front(keys));
- new = bkey_i_to_extent(&_new.k);
+ bch2_bkey_buf_copy(&_new, c, bch2_keylist_front(keys));
+ new = bkey_i_to_extent(_new.k);
bch2_cut_front(iter->pos, &new->k_i);
bch2_cut_front(iter->pos, insert);
@@ -194,6 +198,8 @@ nomatch:
}
out:
bch2_trans_exit(&trans);
+ bch2_bkey_buf_exit(&_insert, c);
+ bch2_bkey_buf_exit(&_new, c);
BUG_ON(ret == -EINTR);
return ret;
}
@@ -513,7 +519,7 @@ static int __bch2_move_data(struct bch_fs *c,
{
bool kthread = (current->flags & PF_KTHREAD) != 0;
struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts);
- struct bkey_on_stack sk;
+ struct bkey_buf sk;
struct btree_trans trans;
struct btree_iter *iter;
struct bkey_s_c k;
@@ -522,7 +528,7 @@ static int __bch2_move_data(struct bch_fs *c,
u64 delay, cur_inum = U64_MAX;
int ret = 0, ret2;
- bkey_on_stack_init(&sk);
+ bch2_bkey_buf_init(&sk);
bch2_trans_init(&trans, c, 0, 0);
stats->data_type = BCH_DATA_user;
@@ -602,7 +608,7 @@ peek:
}
/* unlock before doing IO: */
- bkey_on_stack_reassemble(&sk, c, k);
+ bch2_bkey_buf_reassemble(&sk, c, k);
k = bkey_i_to_s_c(sk.k);
bch2_trans_unlock(&trans);
@@ -630,7 +636,7 @@ next_nondata:
}
out:
ret = bch2_trans_exit(&trans) ?: ret;
- bkey_on_stack_exit(&sk, c);
+ bch2_bkey_buf_exit(&sk, c);
return ret;
}