summaryrefslogtreecommitdiff
path: root/fs/bcachefs/buckets.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2020-01-20 15:34:50 -0500
committerKent Overstreet <kent.overstreet@gmail.com>2020-05-06 17:14:18 -0400
commit6f4bfbc65f9100a98dba1734dbff26129a4413a8 (patch)
tree2ff581f9abb56c912e7007577bb5813f3e5de1c4 /fs/bcachefs/buckets.c
parent181bdfee8fea1f62a6544b6913d2291ecca5fcda (diff)
Merge with fe198a9f39) bcachefs: Improve tracepoints slightly in commit path
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Diffstat (limited to 'fs/bcachefs/buckets.c')
-rw-r--r--fs/bcachefs/buckets.c147
1 files changed, 65 insertions, 82 deletions
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index 8d223aa2bee5..731b93255876 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -161,7 +161,7 @@ struct bch_fs_usage *bch2_fs_usage_scratch_get(struct bch_fs *c)
struct bch_fs_usage *ret;
unsigned bytes = fs_usage_u64s(c) * sizeof(u64);
- ret = kzalloc(bytes, GFP_NOWAIT);
+ ret = kzalloc(bytes, GFP_NOWAIT|__GFP_NOWARN);
if (ret)
return ret;
@@ -628,7 +628,7 @@ unwind:
percpu_rwsem_assert_held(&c->mark_lock); \
\
for (gc = 0; gc < 2 && !ret; gc++) \
- if (!gc == !(flags & BCH_BUCKET_MARK_GC) || \
+ if (!gc == !(flags & BTREE_TRIGGER_GC) || \
(gc && gc_visited(c, pos))) \
ret = fn(c, __VA_ARGS__, gc); \
ret; \
@@ -710,7 +710,7 @@ static int bch2_mark_alloc(struct bch_fs *c, struct bkey_s_c k,
struct bch_fs_usage *fs_usage,
u64 journal_seq, unsigned flags)
{
- bool gc = flags & BCH_BUCKET_MARK_GC;
+ bool gc = flags & BTREE_TRIGGER_GC;
struct bkey_alloc_unpacked u;
struct bch_dev *ca;
struct bucket *g;
@@ -719,8 +719,8 @@ static int bch2_mark_alloc(struct bch_fs *c, struct bkey_s_c k,
/*
* alloc btree is read in by bch2_alloc_read, not gc:
*/
- if ((flags & BCH_BUCKET_MARK_GC) &&
- !(flags & BCH_BUCKET_MARK_BUCKET_INVALIDATE))
+ if ((flags & BTREE_TRIGGER_GC) &&
+ !(flags & BTREE_TRIGGER_BUCKET_INVALIDATE))
return 0;
ca = bch_dev_bkey_exists(c, k.k->p.inode);
@@ -743,7 +743,7 @@ static int bch2_mark_alloc(struct bch_fs *c, struct bkey_s_c k,
}
}));
- if (!(flags & BCH_BUCKET_MARK_ALLOC_READ))
+ if (!(flags & BTREE_TRIGGER_ALLOC_READ))
bch2_dev_usage_update(c, ca, fs_usage, old, m, gc);
g->io_time[READ] = u.read_time;
@@ -756,7 +756,7 @@ static int bch2_mark_alloc(struct bch_fs *c, struct bkey_s_c k,
* not:
*/
- if ((flags & BCH_BUCKET_MARK_BUCKET_INVALIDATE) &&
+ if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
old.cached_sectors) {
update_cached_sectors(c, fs_usage, ca->dev_idx,
-old.cached_sectors);
@@ -842,13 +842,13 @@ static s64 __ptr_disk_sectors_delta(unsigned old_size,
{
BUG_ON(!n || !d);
- if (flags & BCH_BUCKET_MARK_OVERWRITE_SPLIT) {
+ if (flags & BTREE_TRIGGER_OVERWRITE_SPLIT) {
BUG_ON(offset + -delta > old_size);
return -disk_sectors_scaled(n, d, old_size) +
disk_sectors_scaled(n, d, offset) +
disk_sectors_scaled(n, d, old_size - offset + delta);
- } else if (flags & BCH_BUCKET_MARK_OVERWRITE) {
+ } else if (flags & BTREE_TRIGGER_OVERWRITE) {
BUG_ON(offset + -delta > old_size);
return -disk_sectors_scaled(n, d, old_size) +
@@ -874,8 +874,8 @@ static void bucket_set_stripe(struct bch_fs *c,
u64 journal_seq,
unsigned flags)
{
- bool enabled = !(flags & BCH_BUCKET_MARK_OVERWRITE);
- bool gc = flags & BCH_BUCKET_MARK_GC;
+ bool enabled = !(flags & BTREE_TRIGGER_OVERWRITE);
+ bool gc = flags & BTREE_TRIGGER_GC;
unsigned i;
for (i = 0; i < v->nr_blocks; i++) {
@@ -922,7 +922,7 @@ static bool bch2_mark_pointer(struct bch_fs *c,
struct bch_fs_usage *fs_usage,
u64 journal_seq, unsigned flags)
{
- bool gc = flags & BCH_BUCKET_MARK_GC;
+ bool gc = flags & BTREE_TRIGGER_GC;
struct bucket_mark old, new;
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
struct bucket *g = PTR_BUCKET(ca, &p.ptr, gc);
@@ -970,7 +970,7 @@ static bool bch2_mark_pointer(struct bch_fs *c,
new.data_type = data_type;
}
- if (flags & BCH_BUCKET_MARK_NOATOMIC) {
+ if (flags & BTREE_TRIGGER_NOATOMIC) {
g->_mark = new;
break;
}
@@ -1008,7 +1008,7 @@ static int bch2_mark_stripe_ptr(struct bch_fs *c,
unsigned *nr_data,
unsigned *nr_parity)
{
- bool gc = flags & BCH_BUCKET_MARK_GC;
+ bool gc = flags & BTREE_TRIGGER_GC;
struct stripe *m;
unsigned old, new;
int blocks_nonempty_delta;
@@ -1121,7 +1121,7 @@ static int bch2_mark_stripe(struct bch_fs *c, struct bkey_s_c k,
struct bch_fs_usage *fs_usage,
u64 journal_seq, unsigned flags)
{
- bool gc = flags & BCH_BUCKET_MARK_GC;
+ bool gc = flags & BTREE_TRIGGER_GC;
struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
size_t idx = s.k->p.offset;
struct stripe *m = genradix_ptr(&c->stripes[gc], idx);
@@ -1129,14 +1129,14 @@ static int bch2_mark_stripe(struct bch_fs *c, struct bkey_s_c k,
spin_lock(&c->ec_stripes_heap_lock);
- if (!m || ((flags & BCH_BUCKET_MARK_OVERWRITE) && !m->alive)) {
+ if (!m || ((flags & BTREE_TRIGGER_OVERWRITE) && !m->alive)) {
spin_unlock(&c->ec_stripes_heap_lock);
bch_err_ratelimited(c, "error marking nonexistent stripe %zu",
idx);
return -1;
}
- if (!(flags & BCH_BUCKET_MARK_OVERWRITE)) {
+ if (!(flags & BTREE_TRIGGER_OVERWRITE)) {
m->sectors = le16_to_cpu(s.v->sectors);
m->algorithm = s.v->algorithm;
m->nr_blocks = s.v->nr_blocks;
@@ -1152,7 +1152,7 @@ static int bch2_mark_stripe(struct bch_fs *c, struct bkey_s_c k,
#endif
/* gc recalculates these fields: */
- if (!(flags & BCH_BUCKET_MARK_GC)) {
+ if (!(flags & BTREE_TRIGGER_GC)) {
for (i = 0; i < s.v->nr_blocks; i++) {
m->block_sectors[i] =
stripe_blockcount_get(s.v, i);
@@ -1185,16 +1185,16 @@ int bch2_mark_key_locked(struct bch_fs *c,
preempt_disable();
- if (!fs_usage || (flags & BCH_BUCKET_MARK_GC))
+ if (!fs_usage || (flags & BTREE_TRIGGER_GC))
fs_usage = fs_usage_ptr(c, journal_seq,
- flags & BCH_BUCKET_MARK_GC);
+ flags & BTREE_TRIGGER_GC);
switch (k.k->type) {
case KEY_TYPE_alloc:
ret = bch2_mark_alloc(c, k, fs_usage, journal_seq, flags);
break;
case KEY_TYPE_btree_ptr:
- sectors = !(flags & BCH_BUCKET_MARK_OVERWRITE)
+ sectors = !(flags & BTREE_TRIGGER_OVERWRITE)
? c->opts.btree_node_size
: -c->opts.btree_node_size;
@@ -1210,7 +1210,7 @@ int bch2_mark_key_locked(struct bch_fs *c,
ret = bch2_mark_stripe(c, k, fs_usage, journal_seq, flags);
break;
case KEY_TYPE_inode:
- if (!(flags & BCH_BUCKET_MARK_OVERWRITE))
+ if (!(flags & BTREE_TRIGGER_OVERWRITE))
fs_usage->nr_inodes++;
else
fs_usage->nr_inodes--;
@@ -1260,7 +1260,7 @@ inline int bch2_mark_overwrite(struct btree_trans *trans,
unsigned offset = 0;
s64 sectors = 0;
- flags |= BCH_BUCKET_MARK_OVERWRITE;
+ flags |= BTREE_TRIGGER_OVERWRITE;
if (btree_node_is_extents(b)
? bkey_cmp(new->k.p, bkey_start_pos(old.k)) <= 0
@@ -1288,7 +1288,7 @@ inline int bch2_mark_overwrite(struct btree_trans *trans,
offset = bkey_start_offset(&new->k) -
bkey_start_offset(old.k);
sectors = -((s64) new->k.size);
- flags |= BCH_BUCKET_MARK_OVERWRITE_SPLIT;
+ flags |= BTREE_TRIGGER_OVERWRITE_SPLIT;
break;
}
@@ -1300,26 +1300,29 @@ inline int bch2_mark_overwrite(struct btree_trans *trans,
}
int bch2_mark_update(struct btree_trans *trans,
- struct btree_insert_entry *insert,
+ struct btree_iter *iter,
+ struct bkey_i *insert,
struct bch_fs_usage *fs_usage,
unsigned flags)
{
struct bch_fs *c = trans->c;
- struct btree_iter *iter = insert->iter;
struct btree *b = iter->l[0].b;
struct btree_node_iter node_iter = iter->l[0].iter;
struct bkey_packed *_k;
int ret = 0;
+ if (unlikely(flags & BTREE_TRIGGER_NORUN))
+ return 0;
+
if (!btree_node_type_needs_gc(iter->btree_id))
return 0;
- bch2_mark_key_locked(c, bkey_i_to_s_c(insert->k),
- 0, insert->k->k.size,
+ bch2_mark_key_locked(c, bkey_i_to_s_c(insert),
+ 0, insert->k.size,
fs_usage, trans->journal_res.seq,
- BCH_BUCKET_MARK_INSERT|flags);
+ BTREE_TRIGGER_INSERT|flags);
- if (unlikely(trans->flags & BTREE_INSERT_NOMARK_OVERWRITES))
+ if (unlikely(flags & BTREE_TRIGGER_NOOVERWRITES))
return 0;
/*
@@ -1328,7 +1331,7 @@ int bch2_mark_update(struct btree_trans *trans,
*/
if ((iter->btree_id == BTREE_ID_ALLOC ||
iter->btree_id == BTREE_ID_EC) &&
- !bkey_deleted(&insert->k->k))
+ !bkey_deleted(&insert->k))
return 0;
while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, b,
@@ -1336,7 +1339,7 @@ int bch2_mark_update(struct btree_trans *trans,
struct bkey unpacked;
struct bkey_s_c k = bkey_disassemble(b, _k, &unpacked);
- ret = bch2_mark_overwrite(trans, iter, k, insert->k,
+ ret = bch2_mark_overwrite(trans, iter, k, insert,
fs_usage, flags);
if (ret <= 0)
break;
@@ -1430,30 +1433,6 @@ static int trans_get_key(struct btree_trans *trans,
return ret;
}
-static void *trans_update_key(struct btree_trans *trans,
- struct btree_iter *iter,
- unsigned u64s)
-{
- struct btree_insert_entry *i;
- struct bkey_i *new_k;
-
- new_k = bch2_trans_kmalloc(trans, u64s * sizeof(u64));
- if (IS_ERR(new_k))
- return new_k;
-
- bkey_init(&new_k->k);
- new_k->k.p = iter->pos;
-
- trans_for_each_update(trans, i)
- if (i->iter == iter) {
- i->k = new_k;
- return new_k;
- }
-
- bch2_trans_update(trans, iter, new_k);
- return new_k;
-}
-
static int bch2_trans_mark_pointer(struct btree_trans *trans,
struct extent_ptr_decoded p,
s64 sectors, enum bch_data_type data_type)
@@ -1537,7 +1516,7 @@ static int bch2_trans_mark_pointer(struct btree_trans *trans,
u.data_type = u.dirty_sectors || u.cached_sectors
? data_type : 0;
- a = trans_update_key(trans, iter, BKEY_ALLOC_U64s_MAX);
+ a = bch2_trans_kmalloc(trans, BKEY_ALLOC_U64s_MAX * 8);
ret = PTR_ERR_OR_ZERO(a);
if (ret)
goto out;
@@ -1545,6 +1524,7 @@ static int bch2_trans_mark_pointer(struct btree_trans *trans,
bkey_alloc_init(&a->k_i);
a->k.p = iter->pos;
bch2_alloc_pack(a, u);
+ bch2_trans_update(trans, iter, &a->k_i, 0);
out:
bch2_trans_iter_put(trans, iter);
return ret;
@@ -1559,9 +1539,8 @@ static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
{
struct bch_fs *c = trans->c;
struct btree_iter *iter;
- struct bkey_i *new_k;
struct bkey_s_c k;
- struct bkey_s_stripe s;
+ struct bkey_i_stripe *s;
int ret = 0;
ret = trans_get_key(trans, BTREE_ID_EC, POS(0, p.idx), &iter, &k);
@@ -1576,21 +1555,21 @@ static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
goto out;
}
- new_k = trans_update_key(trans, iter, k.k->u64s);
- ret = PTR_ERR_OR_ZERO(new_k);
+ s = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
+ ret = PTR_ERR_OR_ZERO(s);
if (ret)
goto out;
- bkey_reassemble(new_k, k);
- s = bkey_i_to_s_stripe(new_k);
+ bkey_reassemble(&s->k_i, k);
- stripe_blockcount_set(s.v, p.block,
- stripe_blockcount_get(s.v, p.block) +
+ stripe_blockcount_set(&s->v, p.block,
+ stripe_blockcount_get(&s->v, p.block) +
sectors);
- *nr_data = s.v->nr_blocks - s.v->nr_redundant;
- *nr_parity = s.v->nr_redundant;
- bch2_bkey_to_replicas(&r->e, s.s_c);
+ *nr_data = s->v.nr_blocks - s->v.nr_redundant;
+ *nr_parity = s->v.nr_redundant;
+ bch2_bkey_to_replicas(&r->e, bkey_i_to_s_c(&s->k_i));
+ bch2_trans_update(trans, iter, &s->k_i, 0);
out:
bch2_trans_iter_put(trans, iter);
return ret;
@@ -1671,7 +1650,6 @@ static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
{
struct bch_fs *c = trans->c;
struct btree_iter *iter;
- struct bkey_i *new_k;
struct bkey_s_c k;
struct bkey_i_reflink_v *r_v;
s64 ret;
@@ -1689,7 +1667,7 @@ static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
goto err;
}
- if ((flags & BCH_BUCKET_MARK_OVERWRITE) &&
+ if ((flags & BTREE_TRIGGER_OVERWRITE) &&
(bkey_start_offset(k.k) < idx ||
k.k->p.offset > idx + sectors))
goto out;
@@ -1697,21 +1675,22 @@ static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
bch2_btree_iter_set_pos(iter, bkey_start_pos(k.k));
BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK);
- new_k = trans_update_key(trans, iter, k.k->u64s);
- ret = PTR_ERR_OR_ZERO(new_k);
+ r_v = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
+ ret = PTR_ERR_OR_ZERO(r_v);
if (ret)
goto err;
- bkey_reassemble(new_k, k);
- r_v = bkey_i_to_reflink_v(new_k);
+ bkey_reassemble(&r_v->k_i, k);
le64_add_cpu(&r_v->v.refcount,
- !(flags & BCH_BUCKET_MARK_OVERWRITE) ? 1 : -1);
+ !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1);
if (!r_v->v.refcount) {
r_v->k.type = KEY_TYPE_deleted;
set_bkey_val_u64s(&r_v->k, 0);
}
+
+ bch2_trans_update(trans, iter, &r_v->k_i, 0);
out:
ret = k.k->p.offset - idx;
err:
@@ -1750,7 +1729,7 @@ int bch2_trans_mark_key(struct btree_trans *trans, struct bkey_s_c k,
switch (k.k->type) {
case KEY_TYPE_btree_ptr:
- sectors = !(flags & BCH_BUCKET_MARK_OVERWRITE)
+ sectors = !(flags & BTREE_TRIGGER_OVERWRITE)
? c->opts.btree_node_size
: -c->opts.btree_node_size;
@@ -1763,7 +1742,7 @@ int bch2_trans_mark_key(struct btree_trans *trans, struct bkey_s_c k,
case KEY_TYPE_inode:
d = replicas_deltas_realloc(trans, 0);
- if (!(flags & BCH_BUCKET_MARK_OVERWRITE))
+ if (!(flags & BTREE_TRIGGER_OVERWRITE))
d->nr_inodes++;
else
d->nr_inodes--;
@@ -1791,22 +1770,26 @@ int bch2_trans_mark_key(struct btree_trans *trans, struct bkey_s_c k,
int bch2_trans_mark_update(struct btree_trans *trans,
struct btree_iter *iter,
- struct bkey_i *insert)
+ struct bkey_i *insert,
+ unsigned flags)
{
struct btree *b = iter->l[0].b;
struct btree_node_iter node_iter = iter->l[0].iter;
struct bkey_packed *_k;
int ret;
+ if (unlikely(flags & BTREE_TRIGGER_NORUN))
+ return 0;
+
if (!btree_node_type_needs_gc(iter->btree_id))
return 0;
ret = bch2_trans_mark_key(trans, bkey_i_to_s_c(insert),
- 0, insert->k.size, BCH_BUCKET_MARK_INSERT);
+ 0, insert->k.size, BTREE_TRIGGER_INSERT);
if (ret)
return ret;
- if (unlikely(trans->flags & BTREE_INSERT_NOMARK_OVERWRITES))
+ if (unlikely(flags & BTREE_TRIGGER_NOOVERWRITES))
return 0;
while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, b,
@@ -1815,7 +1798,7 @@ int bch2_trans_mark_update(struct btree_trans *trans,
struct bkey_s_c k;
unsigned offset = 0;
s64 sectors = 0;
- unsigned flags = BCH_BUCKET_MARK_OVERWRITE;
+ unsigned flags = BTREE_TRIGGER_OVERWRITE;
k = bkey_disassemble(b, _k, &unpacked);
@@ -1845,7 +1828,7 @@ int bch2_trans_mark_update(struct btree_trans *trans,
offset = bkey_start_offset(&insert->k) -
bkey_start_offset(k.k);
sectors = -((s64) insert->k.size);
- flags |= BCH_BUCKET_MARK_OVERWRITE_SPLIT;
+ flags |= BTREE_TRIGGER_OVERWRITE_SPLIT;
break;
}