summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2022-03-17 18:21:15 -0400
committerKent Overstreet <kent.overstreet@gmail.com>2022-04-02 16:02:57 -0400
commitb9cd9496a25a2ce23d7f9227a1690539fb32f08d (patch)
tree1bdd33aca69347280bc519620b47067f510c4700
parentf20252587576285799eb4777f6d5373055e496fb (diff)
bcachefs: Kill bch2_alloc_write()
This patch introduces bch2_alloc_to_v4_mut() which returns a bkey_i_alloc_v4 *, which then can be passed to bch2_trans_update() directly. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
-rw-r--r--fs/bcachefs/alloc_background.c149
-rw-r--r--fs/bcachefs/alloc_background.h5
-rw-r--r--fs/bcachefs/btree_gc.c20
-rw-r--r--fs/bcachefs/buckets.c117
-rw-r--r--fs/bcachefs/buckets.h8
5 files changed, 163 insertions, 136 deletions
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c
index eb62b4fc2367..1188239a1bcc 100644
--- a/fs/bcachefs/alloc_background.c
+++ b/fs/bcachefs/alloc_background.c
@@ -247,21 +247,48 @@ void bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out)
}
}
-int bch2_alloc_write(struct btree_trans *trans, struct btree_iter *iter,
- struct bch_alloc_v4 *src, unsigned trigger_flags)
+struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
{
- struct bkey_i_alloc_v4 *dst =
- bch2_trans_kmalloc(trans, sizeof(*dst));
+ struct bkey_i_alloc_v4 *ret;
- if (IS_ERR(dst))
- return PTR_ERR(dst);
+ if (k.k->type == KEY_TYPE_alloc_v4) {
+ ret = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
+ if (!IS_ERR(ret))
+ bkey_reassemble(&ret->k_i, k);
+ } else {
+ ret = bch2_trans_kmalloc(trans, sizeof(*ret));
+ if (!IS_ERR(ret)) {
+ bkey_alloc_v4_init(&ret->k_i);
+ ret->k.p = k.k->p;
+ bch2_alloc_to_v4(k, &ret->v);
+ }
+ }
+ return ret;
+}
- bkey_alloc_v4_init(&dst->k_i);
- set_bkey_val_bytes(&dst->k, sizeof(dst->v));
- dst->k.p = iter->pos;
- dst->v = *src;
+struct bkey_i_alloc_v4 *
+bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter *iter,
+ struct bpos pos)
+{
+ struct bkey_s_c k;
+ struct bkey_i_alloc_v4 *a;
+ int ret;
- return bch2_trans_update(trans, iter, &dst->k_i, trigger_flags);
+ bch2_trans_iter_init(trans, iter, BTREE_ID_alloc, pos,
+ BTREE_ITER_WITH_UPDATES|
+ BTREE_ITER_CACHED|
+ BTREE_ITER_INTENT);
+ k = bch2_btree_iter_peek_slot(iter);
+ ret = bkey_err(k);
+ if (ret) {
+ bch2_trans_iter_exit(trans, iter);
+ return ERR_PTR(ret);
+ }
+
+ a = bch2_alloc_to_v4_mut(trans, k);
+ if (IS_ERR(a))
+ bch2_trans_iter_exit(trans, iter);
+ return a;
}
static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
@@ -649,11 +676,25 @@ static int bch2_check_alloc_key(struct btree_trans *trans,
ret = bch2_lru_change(trans,
alloc_k.k->p.inode,
alloc_k.k->p.offset,
- 0, &a.io_time[READ]) ?:
- (a.io_time[READ] != read_time
- ? bch2_alloc_write(trans, alloc_iter, &a, BTREE_TRIGGER_NORUN)
- : 0) ?:
- bch2_trans_commit(trans, NULL, NULL, 0);
+ 0, &a.io_time[READ]);
+ if (ret)
+ goto err;
+
+ if (a.io_time[READ] != read_time) {
+ struct bkey_i_alloc_v4 *a_mut =
+ bch2_alloc_to_v4_mut(trans, alloc_k);
+ ret = PTR_ERR_OR_ZERO(a_mut);
+ if (ret)
+ goto err;
+
+ a_mut->v.io_time[READ] = a.io_time[READ];
+ ret = bch2_trans_update(trans, alloc_iter,
+ &a_mut->k_i, BTREE_TRIGGER_NORUN);
+ if (ret)
+ goto err;
+ }
+
+ ret = bch2_trans_commit(trans, NULL, NULL, 0);
if (ret)
goto err;
}
@@ -802,7 +843,7 @@ static int bch2_clear_need_discard(struct btree_trans *trans, struct bpos pos,
struct bch_fs *c = trans->c;
struct btree_iter iter;
struct bkey_s_c k;
- struct bch_alloc_v4 a;
+ struct bkey_i_alloc_v4 *a;
struct printbuf buf = PRINTBUF;
int ret;
@@ -813,17 +854,20 @@ static int bch2_clear_need_discard(struct btree_trans *trans, struct bpos pos,
if (ret)
goto out;
- bch2_alloc_to_v4(k, &a);
+ a = bch2_alloc_to_v4_mut(trans, k);
+ ret = PTR_ERR_OR_ZERO(a);
+ if (ret)
+ goto out;
- if (BCH_ALLOC_V4_NEED_INC_GEN(&a)) {
- a.gen++;
- SET_BCH_ALLOC_V4_NEED_INC_GEN(&a, false);
+ if (BCH_ALLOC_V4_NEED_INC_GEN(&a->v)) {
+ a->v.gen++;
+ SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
goto write;
}
- BUG_ON(a.journal_seq > c->journal.flushed_seq_ondisk);
+ BUG_ON(a->v.journal_seq > c->journal.flushed_seq_ondisk);
- if (bch2_fs_inconsistent_on(!BCH_ALLOC_V4_NEED_DISCARD(&a), c,
+ if (bch2_fs_inconsistent_on(!BCH_ALLOC_V4_NEED_DISCARD(&a->v), c,
"%s\n incorrectly set in need_discard btree",
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
ret = -EIO;
@@ -847,9 +891,9 @@ static int bch2_clear_need_discard(struct btree_trans *trans, struct bpos pos,
goto out;
}
- SET_BCH_ALLOC_V4_NEED_DISCARD(&a, false);
+ SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false);
write:
- ret = bch2_alloc_write(trans, &iter, &a, 0);
+ ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
out:
bch2_trans_iter_exit(trans, &iter);
printbuf_exit(&buf);
@@ -919,7 +963,7 @@ static int invalidate_one_bucket(struct btree_trans *trans, struct bch_dev *ca)
struct bch_fs *c = trans->c;
struct btree_iter lru_iter, alloc_iter = { NULL };
struct bkey_s_c k;
- struct bch_alloc_v4 a;
+ struct bkey_i_alloc_v4 *a;
u64 bucket, idx;
int ret;
@@ -940,32 +984,27 @@ static int invalidate_one_bucket(struct btree_trans *trans, struct bch_dev *ca)
idx = k.k->p.offset;
bucket = le64_to_cpu(bkey_s_c_to_lru(k).v->idx);
- bch2_trans_iter_init(trans, &alloc_iter, BTREE_ID_alloc,
- POS(ca->dev_idx, bucket),
- BTREE_ITER_CACHED|
- BTREE_ITER_INTENT);
- k = bch2_btree_iter_peek_slot(&alloc_iter);
- ret = bkey_err(k);
+ a = bch2_trans_start_alloc_update(trans, &alloc_iter,
+ POS(ca->dev_idx, bucket));
+ ret = PTR_ERR_OR_ZERO(a);
if (ret)
goto out;
- bch2_alloc_to_v4(k, &a);
-
- if (bch2_fs_inconsistent_on(idx != alloc_lru_idx(a), c,
+ if (bch2_fs_inconsistent_on(idx != alloc_lru_idx(a->v), c,
"invalidating bucket with wrong lru idx (got %llu should be %llu",
- idx, alloc_lru_idx(a)))
+ idx, alloc_lru_idx(a->v)))
goto out;
- SET_BCH_ALLOC_V4_NEED_INC_GEN(&a, false);
- a.gen++;
- a.data_type = 0;
- a.dirty_sectors = 0;
- a.cached_sectors = 0;
- a.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
- a.io_time[WRITE] = atomic64_read(&c->io_clock[WRITE].now);
+ SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
+ a->v.gen++;
+ a->v.data_type = 0;
+ a->v.dirty_sectors = 0;
+ a->v.cached_sectors = 0;
+ a->v.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
+ a->v.io_time[WRITE] = atomic64_read(&c->io_clock[WRITE].now);
- ret = bch2_alloc_write(trans, &alloc_iter, &a,
- BTREE_TRIGGER_BUCKET_INVALIDATE);
+ ret = bch2_trans_update(trans, &alloc_iter, &a->k_i,
+ BTREE_TRIGGER_BUCKET_INVALIDATE);
out:
bch2_trans_iter_exit(trans, &alloc_iter);
bch2_trans_iter_exit(trans, &lru_iter);
@@ -1087,28 +1126,22 @@ int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
{
struct bch_fs *c = trans->c;
struct btree_iter iter;
- struct bkey_s_c k;
- struct bch_alloc_v4 a;
+ struct bkey_i_alloc_v4 *a;
u64 now;
int ret = 0;
- bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS(dev, bucket_nr),
- BTREE_ITER_CACHED|
- BTREE_ITER_INTENT);
- k = bch2_btree_iter_peek_slot(&iter);
- ret = bkey_err(k);
+ a = bch2_trans_start_alloc_update(trans, &iter, POS(dev, bucket_nr));
+ ret = PTR_ERR_OR_ZERO(a);
if (ret)
- goto out;
-
- bch2_alloc_to_v4(k, &a);
+ return ret;
now = atomic64_read(&c->io_clock[rw].now);
- if (a.io_time[rw] == now)
+ if (a->v.io_time[rw] == now)
goto out;
- a.io_time[rw] = now;
+ a->v.io_time[rw] = now;
- ret = bch2_alloc_write(trans, &iter, &a, 0) ?:
+ ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
bch2_trans_commit(trans, NULL, NULL, 0);
out:
bch2_trans_iter_exit(trans, &iter);
diff --git a/fs/bcachefs/alloc_background.h b/fs/bcachefs/alloc_background.h
index 11fe7273bd69..da1b650e8017 100644
--- a/fs/bcachefs/alloc_background.h
+++ b/fs/bcachefs/alloc_background.h
@@ -56,10 +56,11 @@ static inline struct bpos alloc_freespace_pos(struct bpos pos, struct bch_alloc_
return pos;
}
-int bch2_alloc_write(struct btree_trans *, struct btree_iter *,
- struct bch_alloc_v4 *, unsigned);
+struct bkey_i_alloc_v4 *
+bch2_trans_start_alloc_update(struct btree_trans *, struct btree_iter *, struct bpos);
void bch2_alloc_to_v4(struct bkey_s_c, struct bch_alloc_v4 *);
+struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *, struct bkey_s_c);
int bch2_bucket_io_time_reset(struct btree_trans *, unsigned, size_t, int);
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
index 720001782216..e19991796c82 100644
--- a/fs/bcachefs/btree_gc.c
+++ b/fs/bcachefs/btree_gc.c
@@ -1330,6 +1330,7 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
struct bch_dev *ca = bch_dev_bkey_exists(c, iter->pos.inode);
struct bucket gc;
struct bkey_s_c k;
+ struct bkey_i_alloc_v4 *a;
struct bch_alloc_v4 old, new;
int ret;
@@ -1375,7 +1376,14 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
if (!bch2_alloc_v4_cmp(old, new))
return 0;
- ret = bch2_alloc_write(trans, iter, &new, BTREE_TRIGGER_NORUN);
+ a = bch2_alloc_to_v4_mut(trans, k);
+ ret = PTR_ERR_OR_ZERO(a);
+ if (ret)
+ return ret;
+
+ a->v = new;
+
+ ret = bch2_trans_update(trans, iter, &a->k_i, BTREE_TRIGGER_NORUN);
fsck_err:
return ret;
}
@@ -1900,6 +1908,7 @@ static int bch2_alloc_write_oldest_gen(struct btree_trans *trans, struct btree_i
struct bch_dev *ca = bch_dev_bkey_exists(trans->c, iter->pos.inode);
struct bkey_s_c k;
struct bch_alloc_v4 a;
+ struct bkey_i_alloc_v4 *a_mut;
int ret;
k = bch2_btree_iter_peek_slot(iter);
@@ -1912,9 +1921,14 @@ static int bch2_alloc_write_oldest_gen(struct btree_trans *trans, struct btree_i
if (a.oldest_gen == ca->oldest_gen[iter->pos.offset])
return 0;
- a.oldest_gen = ca->oldest_gen[iter->pos.offset];
+ a_mut = bch2_alloc_to_v4_mut(trans, k);
+ ret = PTR_ERR_OR_ZERO(a_mut);
+ if (ret)
+ return ret;
+
+ a_mut->v.oldest_gen = ca->oldest_gen[iter->pos.offset];
- return bch2_alloc_write(trans, iter, &a, 0);
+ return bch2_trans_update(trans, iter, &a_mut->k_i, 0);
}
int bch2_gc_gens(struct bch_fs *c)
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index 011f18ecbe5e..7654ab24a909 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -1386,50 +1386,25 @@ need_mark:
/* trans_mark: */
-static int bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter *iter,
- const struct bch_extent_ptr *ptr,
- struct bch_alloc_v4 *a)
-{
- struct bch_fs *c = trans->c;
- struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
- struct bkey_s_c k;
- int ret;
-
- bch2_trans_iter_init(trans, iter, BTREE_ID_alloc,
- POS(ptr->dev, PTR_BUCKET_NR(ca, ptr)),
- BTREE_ITER_WITH_UPDATES|
- BTREE_ITER_CACHED|
- BTREE_ITER_INTENT);
- k = bch2_btree_iter_peek_slot(iter);
- ret = bkey_err(k);
- if (ret) {
- bch2_trans_iter_exit(trans, iter);
- return ret;
- }
-
- bch2_alloc_to_v4(k, a);
- return 0;
-}
-
static int bch2_trans_mark_pointer(struct btree_trans *trans,
struct bkey_s_c k, struct extent_ptr_decoded p,
s64 sectors, enum bch_data_type data_type)
{
struct btree_iter iter;
- struct bch_alloc_v4 a;
+ struct bkey_i_alloc_v4 *a;
int ret;
- ret = bch2_trans_start_alloc_update(trans, &iter, &p.ptr, &a);
- if (ret)
- return ret;
+ a = bch2_trans_start_alloc_update(trans, &iter, PTR_BUCKET_POS(trans->c, &p.ptr));
+ if (IS_ERR(a))
+ return PTR_ERR(a);
ret = __mark_pointer(trans, k, &p.ptr, sectors, data_type,
- a.gen, &a.data_type,
- &a.dirty_sectors, &a.cached_sectors);
+ a->v.gen, &a->v.data_type,
+ &a->v.dirty_sectors, &a->v.cached_sectors);
if (ret)
goto out;
- ret = bch2_alloc_write(trans, &iter, &a, 0);
+ ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
if (ret)
goto out;
out:
@@ -1562,7 +1537,7 @@ static int bch2_trans_mark_stripe_bucket(struct btree_trans *trans,
struct bch_fs *c = trans->c;
const struct bch_extent_ptr *ptr = &s.v->ptrs[idx];
struct btree_iter iter;
- struct bch_alloc_v4 a;
+ struct bkey_i_alloc_v4 *a;
enum bch_data_type data_type = idx >= s.v->nr_blocks - s.v->nr_redundant
? BCH_DATA_parity : 0;
s64 sectors = data_type ? le16_to_cpu(s.v->sectors) : 0;
@@ -1571,59 +1546,59 @@ static int bch2_trans_mark_stripe_bucket(struct btree_trans *trans,
if (deleting)
sectors = -sectors;
- ret = bch2_trans_start_alloc_update(trans, &iter, ptr, &a);
- if (ret)
- return ret;
+ a = bch2_trans_start_alloc_update(trans, &iter, PTR_BUCKET_POS(c, ptr));
+ if (IS_ERR(a))
+ return PTR_ERR(a);
ret = check_bucket_ref(c, s.s_c, ptr, sectors, data_type,
- a.gen, a.data_type,
- a.dirty_sectors, a.cached_sectors);
+ a->v.gen, a->v.data_type,
+ a->v.dirty_sectors, a->v.cached_sectors);
if (ret)
goto err;
if (!deleting) {
- if (bch2_trans_inconsistent_on(a.stripe ||
- a.stripe_redundancy, trans,
+ if (bch2_trans_inconsistent_on(a->v.stripe ||
+ a->v.stripe_redundancy, trans,
"bucket %llu:%llu gen %u data type %s dirty_sectors %u: multiple stripes using same bucket (%u, %llu)",
- iter.pos.inode, iter.pos.offset, a.gen,
- bch2_data_types[a.data_type],
- a.dirty_sectors,
- a.stripe, s.k->p.offset)) {
+ iter.pos.inode, iter.pos.offset, a->v.gen,
+ bch2_data_types[a->v.data_type],
+ a->v.dirty_sectors,
+ a->v.stripe, s.k->p.offset)) {
ret = -EIO;
goto err;
}
- if (bch2_trans_inconsistent_on(data_type && a.dirty_sectors, trans,
+ if (bch2_trans_inconsistent_on(data_type && a->v.dirty_sectors, trans,
"bucket %llu:%llu gen %u data type %s dirty_sectors %u: data already in stripe bucket %llu",
- iter.pos.inode, iter.pos.offset, a.gen,
- bch2_data_types[a.data_type],
- a.dirty_sectors,
+ iter.pos.inode, iter.pos.offset, a->v.gen,
+ bch2_data_types[a->v.data_type],
+ a->v.dirty_sectors,
s.k->p.offset)) {
ret = -EIO;
goto err;
}
- a.stripe = s.k->p.offset;
- a.stripe_redundancy = s.v->nr_redundant;
+ a->v.stripe = s.k->p.offset;
+ a->v.stripe_redundancy = s.v->nr_redundant;
} else {
- if (bch2_trans_inconsistent_on(a.stripe != s.k->p.offset ||
- a.stripe_redundancy != s.v->nr_redundant, trans,
+ if (bch2_trans_inconsistent_on(a->v.stripe != s.k->p.offset ||
+ a->v.stripe_redundancy != s.v->nr_redundant, trans,
"bucket %llu:%llu gen %u: not marked as stripe when deleting stripe %llu (got %u)",
- iter.pos.inode, iter.pos.offset, a.gen,
- s.k->p.offset, a.stripe)) {
+ iter.pos.inode, iter.pos.offset, a->v.gen,
+ s.k->p.offset, a->v.stripe)) {
ret = -EIO;
goto err;
}
- a.stripe = 0;
- a.stripe_redundancy = 0;
+ a->v.stripe = 0;
+ a->v.stripe_redundancy = 0;
}
- a.dirty_sectors += sectors;
+ a->v.dirty_sectors += sectors;
if (data_type)
- a.data_type = !deleting ? data_type : 0;
+ a->v.data_type = !deleting ? data_type : 0;
- ret = bch2_alloc_write(trans, &iter, &a, 0);
+ ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
if (ret)
goto err;
err:
@@ -1853,11 +1828,7 @@ static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
{
struct bch_fs *c = trans->c;
struct btree_iter iter;
- struct bch_alloc_v4 a;
- struct bch_extent_ptr ptr = {
- .dev = ca->dev_idx,
- .offset = bucket_to_sector(ca, b),
- };
+ struct bkey_i_alloc_v4 *a;
int ret = 0;
/*
@@ -1866,26 +1837,26 @@ static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
if (b >= ca->mi.nbuckets)
return 0;
- ret = bch2_trans_start_alloc_update(trans, &iter, &ptr, &a);
- if (ret)
- return ret;
+ a = bch2_trans_start_alloc_update(trans, &iter, POS(ca->dev_idx, b));
+ if (IS_ERR(a))
+ return PTR_ERR(a);
- if (a.data_type && a.data_type != type) {
+ if (a->v.data_type && a->v.data_type != type) {
bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
"bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n"
"while marking %s",
- iter.pos.inode, iter.pos.offset, a.gen,
- bch2_data_types[a.data_type],
+ iter.pos.inode, iter.pos.offset, a->v.gen,
+ bch2_data_types[a->v.data_type],
bch2_data_types[type],
bch2_data_types[type]);
ret = -EIO;
goto out;
}
- a.data_type = type;
- a.dirty_sectors = sectors;
+ a->v.data_type = type;
+ a->v.dirty_sectors = sectors;
- ret = bch2_alloc_write(trans, &iter, &a, 0);
+ ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
if (ret)
goto out;
out:
diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h
index 25baca33e885..853bc9dd1294 100644
--- a/fs/bcachefs/buckets.h
+++ b/fs/bcachefs/buckets.h
@@ -66,6 +66,14 @@ static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
return sector_to_bucket(ca, ptr->offset);
}
+static inline struct bpos PTR_BUCKET_POS(const struct bch_fs *c,
+ const struct bch_extent_ptr *ptr)
+{
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+
+ return POS(ptr->dev, PTR_BUCKET_NR(ca, ptr));
+}
+
static inline struct bucket *PTR_GC_BUCKET(struct bch_dev *ca,
const struct bch_extent_ptr *ptr)
{