summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2020-06-06 12:28:01 -0400
committerKent Overstreet <kent.overstreet@gmail.com>2020-06-15 15:45:31 -0400
commit8dd7a2097a1f800b35a7823da76885d6f742360c (patch)
treeb19ecad55912664c80c05465b86dcb8fad3a51c3
parent4c938ca3f851418cbe5078bfb11545d21b4992f1 (diff)
bcachefs: btree_bkey_cached_common
This is prep work for the btree key cache: btree iterators will point to either struct btree, or a new struct bkey_cached. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
-rw-r--r--fs/bcachefs/btree_cache.c74
-rw-r--r--fs/bcachefs/btree_cache.h2
-rw-r--r--fs/bcachefs/btree_gc.c40
-rw-r--r--fs/bcachefs/btree_gc.h2
-rw-r--r--fs/bcachefs/btree_io.c42
-rw-r--r--fs/bcachefs/btree_io.h2
-rw-r--r--fs/bcachefs/btree_iter.c82
-rw-r--r--fs/bcachefs/btree_iter.h8
-rw-r--r--fs/bcachefs/btree_locking.h24
-rw-r--r--fs/bcachefs/btree_types.h15
-rw-r--r--fs/bcachefs/btree_update_interior.c111
-rw-r--r--fs/bcachefs/btree_update_interior.h6
-rw-r--r--fs/bcachefs/btree_update_leaf.c2
-rw-r--r--fs/bcachefs/debug.c4
-rw-r--r--fs/bcachefs/recovery.c18
-rw-r--r--include/trace/events/bcachefs.h6
16 files changed, 221 insertions, 217 deletions
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
index dc169a845da7..b6a716cd4b6d 100644
--- a/fs/bcachefs/btree_cache.c
+++ b/fs/bcachefs/btree_cache.c
@@ -28,7 +28,7 @@ void bch2_recalc_btree_reserve(struct bch_fs *c)
for (i = 0; i < BTREE_ID_NR; i++)
if (c->btree_roots[i].b)
reserve += min_t(unsigned, 1,
- c->btree_roots[i].b->level) * 8;
+ c->btree_roots[i].b->c.level) * 8;
c->btree_cache.reserve = reserve;
}
@@ -108,7 +108,7 @@ static struct btree *btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp)
return NULL;
bkey_btree_ptr_init(&b->key);
- six_lock_init(&b->lock);
+ six_lock_init(&b->c.lock);
INIT_LIST_HEAD(&b->list);
INIT_LIST_HEAD(&b->write_blocked);
@@ -140,8 +140,8 @@ int bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b,
{
int ret;
- b->level = level;
- b->btree_id = id;
+ b->c.level = level;
+ b->c.btree_id = id;
mutex_lock(&bc->lock);
ret = __bch2_btree_node_hash_insert(bc, b);
@@ -172,10 +172,10 @@ static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush)
lockdep_assert_held(&bc->lock);
- if (!six_trylock_intent(&b->lock))
+ if (!six_trylock_intent(&b->c.lock))
return -ENOMEM;
- if (!six_trylock_write(&b->lock))
+ if (!six_trylock_write(&b->c.lock))
goto out_unlock_intent;
if (btree_node_noevict(b))
@@ -216,9 +216,9 @@ out:
trace_btree_node_reap(c, b);
return ret;
out_unlock:
- six_unlock_write(&b->lock);
+ six_unlock_write(&b->c.lock);
out_unlock_intent:
- six_unlock_intent(&b->lock);
+ six_unlock_intent(&b->c.lock);
ret = -ENOMEM;
goto out;
}
@@ -276,8 +276,8 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
if (++i > 3 &&
!btree_node_reclaim(c, b)) {
btree_node_data_free(c, b);
- six_unlock_write(&b->lock);
- six_unlock_intent(&b->lock);
+ six_unlock_write(&b->c.lock);
+ six_unlock_intent(&b->c.lock);
freed++;
}
}
@@ -303,8 +303,8 @@ restart:
mutex_unlock(&bc->lock);
bch2_btree_node_hash_remove(bc, b);
- six_unlock_write(&b->lock);
- six_unlock_intent(&b->lock);
+ six_unlock_write(&b->c.lock);
+ six_unlock_intent(&b->c.lock);
if (freed >= nr)
goto out;
@@ -555,12 +555,12 @@ got_node:
goto err;
bkey_btree_ptr_init(&b->key);
- six_lock_init(&b->lock);
+ six_lock_init(&b->c.lock);
INIT_LIST_HEAD(&b->list);
INIT_LIST_HEAD(&b->write_blocked);
- BUG_ON(!six_trylock_intent(&b->lock));
- BUG_ON(!six_trylock_write(&b->lock));
+ BUG_ON(!six_trylock_intent(&b->c.lock));
+ BUG_ON(!six_trylock_write(&b->c.lock));
}
if (!b->data) {
@@ -593,8 +593,8 @@ err:
if (b) {
list_add(&b->list, &bc->freed);
- six_unlock_write(&b->lock);
- six_unlock_intent(&b->lock);
+ six_unlock_write(&b->c.lock);
+ six_unlock_intent(&b->c.lock);
}
/* Try to cannibalize another cached btree node: */
@@ -649,8 +649,8 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
list_add(&b->list, &bc->freeable);
mutex_unlock(&bc->lock);
- six_unlock_write(&b->lock);
- six_unlock_intent(&b->lock);
+ six_unlock_write(&b->c.lock);
+ six_unlock_intent(&b->c.lock);
return NULL;
}
@@ -664,22 +664,22 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
bch2_btree_node_read(c, b, sync);
- six_unlock_write(&b->lock);
+ six_unlock_write(&b->c.lock);
if (!sync) {
- six_unlock_intent(&b->lock);
+ six_unlock_intent(&b->c.lock);
return NULL;
}
if (lock_type == SIX_LOCK_read)
- six_lock_downgrade(&b->lock);
+ six_lock_downgrade(&b->c.lock);
return b;
}
static int lock_node_check_fn(struct six_lock *lock, void *p)
{
- struct btree *b = container_of(lock, struct btree, lock);
+ struct btree *b = container_of(lock, struct btree, c.lock);
const struct bkey_i *k = p;
return b->hash_val == btree_ptr_hash_val(k) ? 0 : -1;
@@ -765,9 +765,9 @@ lock_node:
}
if (unlikely(b->hash_val != btree_ptr_hash_val(k) ||
- b->level != level ||
+ b->c.level != level ||
race_fault())) {
- six_unlock_type(&b->lock, lock_type);
+ six_unlock_type(&b->c.lock, lock_type);
if (bch2_btree_node_relock(iter, level + 1))
goto retry;
@@ -795,11 +795,11 @@ lock_node:
set_btree_node_accessed(b);
if (unlikely(btree_node_read_error(b))) {
- six_unlock_type(&b->lock, lock_type);
+ six_unlock_type(&b->c.lock, lock_type);
return ERR_PTR(-EIO);
}
- EBUG_ON(b->btree_id != iter->btree_id ||
+ EBUG_ON(b->c.btree_id != iter->btree_id ||
BTREE_NODE_LEVEL(b->data) != level ||
bkey_cmp(b->data->max_key, k->k.p));
@@ -835,14 +835,14 @@ retry:
return b;
} else {
lock_node:
- ret = six_lock_read(&b->lock, lock_node_check_fn, (void *) k);
+ ret = six_lock_read(&b->c.lock, lock_node_check_fn, (void *) k);
if (ret)
goto retry;
if (unlikely(b->hash_val != btree_ptr_hash_val(k) ||
- b->btree_id != btree_id ||
- b->level != level)) {
- six_unlock_read(&b->lock);
+ b->c.btree_id != btree_id ||
+ b->c.level != level)) {
+ six_unlock_read(&b->c.lock);
goto retry;
}
}
@@ -866,11 +866,11 @@ lock_node:
set_btree_node_accessed(b);
if (unlikely(btree_node_read_error(b))) {
- six_unlock_read(&b->lock);
+ six_unlock_read(&b->c.lock);
return ERR_PTR(-EIO);
}
- EBUG_ON(b->btree_id != btree_id ||
+ EBUG_ON(b->c.btree_id != btree_id ||
BTREE_NODE_LEVEL(b->data) != level ||
bkey_cmp(b->data->max_key, k->k.p));
@@ -888,7 +888,7 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *c,
struct bkey_packed *k;
BKEY_PADDED(k) tmp;
struct btree *ret = NULL;
- unsigned level = b->level;
+ unsigned level = b->c.level;
parent = btree_iter_node(iter, level + 1);
if (!parent)
@@ -911,7 +911,7 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *c,
goto out;
}
- node_iter = iter->l[parent->level].iter;
+ node_iter = iter->l[parent->c.level].iter;
k = bch2_btree_node_iter_peek_all(&node_iter, parent);
BUG_ON(bkey_cmp_left_packed(parent, k, &b->key.k.p));
@@ -958,7 +958,7 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *c,
btree_iter_set_dirty(iter, BTREE_ITER_NEED_RELOCK);
if (!IS_ERR(ret)) {
- six_unlock_intent(&ret->lock);
+ six_unlock_intent(&ret->c.lock);
ret = ERR_PTR(-EINTR);
}
}
@@ -1019,7 +1019,7 @@ void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c,
pr_buf(out,
"l %u %llu:%llu - %llu:%llu:\n"
" ptrs: ",
- b->level,
+ b->c.level,
b->data->min_key.inode,
b->data->min_key.offset,
b->data->max_key.inode,
diff --git a/fs/bcachefs/btree_cache.h b/fs/bcachefs/btree_cache.h
index 98cca30778ea..2160012c734f 100644
--- a/fs/bcachefs/btree_cache.h
+++ b/fs/bcachefs/btree_cache.h
@@ -101,7 +101,7 @@ static inline unsigned btree_blocks(struct bch_fs *c)
(BTREE_FOREGROUND_MERGE_THRESHOLD(c) + \
(BTREE_FOREGROUND_MERGE_THRESHOLD(c) << 2))
-#define btree_node_root(_c, _b) ((_c)->btree_roots[(_b)->btree_id].b)
+#define btree_node_root(_c, _b) ((_c)->btree_roots[(_b)->c.btree_id].b)
void bch2_btree_node_to_text(struct printbuf *, struct bch_fs *,
struct btree *);
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
index 087a717aface..e8abc1937b55 100644
--- a/fs/bcachefs/btree_gc.c
+++ b/fs/bcachefs/btree_gc.c
@@ -186,7 +186,7 @@ static int btree_gc_mark_node(struct bch_fs *c, struct btree *b, u8 *max_stale,
bch2_btree_node_iter_advance(&iter, b);
- if (b->level) {
+ if (b->c.level) {
ret = bch2_gc_check_topology(c, k,
&next_node_start,
b->data->max_key,
@@ -252,7 +252,7 @@ static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id,
if (!btree_node_fake(b))
ret = bch2_gc_mark_key(c, bkey_i_to_s_c(&b->key),
&max_stale, initial);
- gc_pos_set(c, gc_pos_btree_root(b->btree_id));
+ gc_pos_set(c, gc_pos_btree_root(b->c.btree_id));
mutex_unlock(&c->btree_root_lock);
return ret;
@@ -280,7 +280,7 @@ static int bch2_gc_btree_init_recurse(struct bch_fs *c, struct btree *b,
if (ret)
break;
- if (b->level) {
+ if (b->c.level) {
struct btree *child;
BKEY_PADDED(k) tmp;
@@ -296,16 +296,16 @@ static int bch2_gc_btree_init_recurse(struct bch_fs *c, struct btree *b,
if (ret)
break;
- if (b->level > target_depth) {
+ if (b->c.level > target_depth) {
child = bch2_btree_node_get_noiter(c, &tmp.k,
- b->btree_id, b->level - 1);
+ b->c.btree_id, b->c.level - 1);
ret = PTR_ERR_OR_ZERO(child);
if (ret)
break;
ret = bch2_gc_btree_init_recurse(c, child,
journal_keys, target_depth);
- six_unlock_read(&child->lock);
+ six_unlock_read(&child->c.lock);
if (ret)
break;
@@ -336,7 +336,7 @@ static int bch2_gc_btree_init(struct bch_fs *c,
if (btree_node_fake(b))
return 0;
- six_lock_read(&b->lock, NULL, NULL);
+ six_lock_read(&b->c.lock, NULL, NULL);
if (fsck_err_on(bkey_cmp(b->data->min_key, POS_MIN), c,
"btree root with incorrect min_key: %llu:%llu",
b->data->min_key.inode,
@@ -351,7 +351,7 @@ static int bch2_gc_btree_init(struct bch_fs *c,
BUG();
}
- if (b->level >= target_depth)
+ if (b->c.level >= target_depth)
ret = bch2_gc_btree_init_recurse(c, b,
journal_keys, target_depth);
@@ -359,7 +359,7 @@ static int bch2_gc_btree_init(struct bch_fs *c,
ret = bch2_gc_mark_key(c, bkey_i_to_s_c(&b->key),
&max_stale, true);
fsck_err:
- six_unlock_read(&b->lock);
+ six_unlock_read(&b->c.lock);
return ret;
}
@@ -1078,9 +1078,9 @@ static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter,
set_btree_bset_end(n1, n1->set);
- six_unlock_write(&n2->lock);
+ six_unlock_write(&n2->c.lock);
bch2_btree_node_free_never_inserted(c, n2);
- six_unlock_intent(&n2->lock);
+ six_unlock_intent(&n2->c.lock);
memmove(new_nodes + i - 1,
new_nodes + i,
@@ -1116,7 +1116,7 @@ static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter,
bch2_btree_build_aux_trees(n);
bch2_btree_update_add_new_node(as, n);
- six_unlock_write(&n->lock);
+ six_unlock_write(&n->c.lock);
bch2_btree_node_write(c, n, SIX_LOCK_intent);
}
@@ -1159,7 +1159,7 @@ next:
BUG_ON(!bch2_keylist_empty(&keylist));
- BUG_ON(iter->l[old_nodes[0]->level].b != old_nodes[0]);
+ BUG_ON(iter->l[old_nodes[0]->c.level].b != old_nodes[0]);
bch2_btree_iter_node_replace(iter, new_nodes[0]);
@@ -1184,7 +1184,7 @@ next:
}
for (i = 0; i < nr_new_nodes; i++)
- six_unlock_intent(&new_nodes[i]->lock);
+ six_unlock_intent(&new_nodes[i]->c.lock);
bch2_btree_update_done(as);
bch2_keylist_free(&keylist, NULL);
@@ -1225,11 +1225,11 @@ static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id)
for (i = 1; i < GC_MERGE_NODES; i++) {
if (!merge[i] ||
- !six_relock_intent(&merge[i]->lock, lock_seq[i]))
+ !six_relock_intent(&merge[i]->c.lock, lock_seq[i]))
break;
- if (merge[i]->level != merge[0]->level) {
- six_unlock_intent(&merge[i]->lock);
+ if (merge[i]->c.level != merge[0]->c.level) {
+ six_unlock_intent(&merge[i]->c.lock);
break;
}
}
@@ -1238,11 +1238,11 @@ static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id)
bch2_coalesce_nodes(c, iter, merge);
for (i = 1; i < GC_MERGE_NODES && merge[i]; i++) {
- lock_seq[i] = merge[i]->lock.state.seq;
- six_unlock_intent(&merge[i]->lock);
+ lock_seq[i] = merge[i]->c.lock.state.seq;
+ six_unlock_intent(&merge[i]->c.lock);
}
- lock_seq[0] = merge[0]->lock.state.seq;
+ lock_seq[0] = merge[0]->c.lock.state.seq;
if (kthread && kthread_should_stop()) {
bch2_trans_exit(&trans);
diff --git a/fs/bcachefs/btree_gc.h b/fs/bcachefs/btree_gc.h
index e09af2fda3b6..3694a3df62a8 100644
--- a/fs/bcachefs/btree_gc.h
+++ b/fs/bcachefs/btree_gc.h
@@ -82,7 +82,7 @@ static inline struct gc_pos gc_pos_btree(enum btree_id id,
*/
static inline struct gc_pos gc_pos_btree_node(struct btree *b)
{
- return gc_pos_btree(b->btree_id, b->key.k.p, b->level);
+ return gc_pos_btree(b->c.btree_id, b->key.k.p, b->c.level);
}
/*
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c
index 6a42ce2522fd..5fc9137b822e 100644
--- a/fs/bcachefs/btree_io.c
+++ b/fs/bcachefs/btree_io.c
@@ -584,8 +584,8 @@ void bch2_btree_init_next(struct bch_fs *c, struct btree *b,
struct btree_node_entry *bne;
bool did_sort;
- EBUG_ON(!(b->lock.state.seq & 1));
- EBUG_ON(iter && iter->l[b->level].b != b);
+ EBUG_ON(!(b->c.lock.state.seq & 1));
+ EBUG_ON(iter && iter->l[b->c.level].b != b);
did_sort = btree_node_compact(c, b, iter);
@@ -634,8 +634,8 @@ static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
pr_buf(out, "error validating btree node %sat btree %u level %u/%u\n"
"pos ",
write ? "before write " : "",
- b->btree_id, b->level,
- c->btree_roots[b->btree_id].level);
+ b->c.btree_id, b->c.level,
+ c->btree_roots[b->c.btree_id].level);
bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key));
pr_buf(out, " node offset %u", b->written);
@@ -747,11 +747,11 @@ static int validate_bset(struct bch_fs *c, struct btree *b,
"incorrect sequence number (wrong btree node)");
}
- btree_err_on(BTREE_NODE_ID(bn) != b->btree_id,
+ btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id,
BTREE_ERR_MUST_RETRY, c, b, i,
"incorrect btree id");
- btree_err_on(BTREE_NODE_LEVEL(bn) != b->level,
+ btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level,
BTREE_ERR_MUST_RETRY, c, b, i,
"incorrect level");
@@ -762,7 +762,7 @@ static int validate_bset(struct bch_fs *c, struct btree *b,
}
if (!write)
- compat_btree_node(b->level, b->btree_id, version,
+ compat_btree_node(b->c.level, b->c.btree_id, version,
BSET_BIG_ENDIAN(i), write, bn);
if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
@@ -783,7 +783,7 @@ static int validate_bset(struct bch_fs *c, struct btree *b,
"incorrect max key");
if (write)
- compat_btree_node(b->level, b->btree_id, version,
+ compat_btree_node(b->c.level, b->c.btree_id, version,
BSET_BIG_ENDIAN(i), write, bn);
/* XXX: ideally we would be validating min_key too */
@@ -805,7 +805,7 @@ static int validate_bset(struct bch_fs *c, struct btree *b,
BTREE_ERR_FATAL, c, b, i,
"invalid bkey format: %s", err);
- compat_bformat(b->level, b->btree_id, version,
+ compat_bformat(b->c.level, b->c.btree_id, version,
BSET_BIG_ENDIAN(i), write,
&bn->format);
}
@@ -851,7 +851,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
/* XXX: validate k->u64s */
if (!write)
- bch2_bkey_compat(b->level, b->btree_id, version,
+ bch2_bkey_compat(b->c.level, b->c.btree_id, version,
BSET_BIG_ENDIAN(i), write,
&b->format, k);
@@ -874,7 +874,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
}
if (write)
- bch2_bkey_compat(b->level, b->btree_id, version,
+ bch2_bkey_compat(b->c.level, b->c.btree_id, version,
BSET_BIG_ENDIAN(i), write,
&b->format, k);
@@ -1280,8 +1280,8 @@ int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
bch2_btree_set_root_for_read(c, b);
err:
- six_unlock_write(&b->lock);
- six_unlock_intent(&b->lock);
+ six_unlock_write(&b->c.lock);
+ six_unlock_intent(&b->c.lock);
return ret;
}
@@ -1325,15 +1325,15 @@ static void bch2_btree_node_write_error(struct bch_fs *c,
bch2_trans_init(&trans, c, 0, 0);
- iter = bch2_trans_get_node_iter(&trans, b->btree_id, b->key.k.p,
- BTREE_MAX_DEPTH, b->level, 0);
+ iter = bch2_trans_get_node_iter(&trans, b->c.btree_id, b->key.k.p,
+ BTREE_MAX_DEPTH, b->c.level, 0);
retry:
ret = bch2_btree_iter_traverse(iter);
if (ret)
goto err;
/* has node been freed? */
- if (iter->l[b->level].b != b) {
+ if (iter->l[b->c.level].b != b) {
/* node has been freed: */
BUG_ON(!btree_node_dying(b));
goto out;
@@ -1764,18 +1764,18 @@ void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
BUG_ON(lock_type_held == SIX_LOCK_write);
if (lock_type_held == SIX_LOCK_intent ||
- six_lock_tryupgrade(&b->lock)) {
+ six_lock_tryupgrade(&b->c.lock)) {
__bch2_btree_node_write(c, b, SIX_LOCK_intent);
/* don't cycle lock unnecessarily: */
if (btree_node_just_written(b) &&
- six_trylock_write(&b->lock)) {
+ six_trylock_write(&b->c.lock)) {
bch2_btree_post_write_cleanup(c, b);
- six_unlock_write(&b->lock);
+ six_unlock_write(&b->c.lock);
}
if (lock_type_held == SIX_LOCK_read)
- six_lock_downgrade(&b->lock);
+ six_lock_downgrade(&b->c.lock);
} else {
__bch2_btree_node_write(c, b, SIX_LOCK_read);
}
@@ -1845,7 +1845,7 @@ ssize_t bch2_dirty_btree_nodes_print(struct bch_fs *c, char *buf)
b,
(flags & (1 << BTREE_NODE_dirty)) != 0,
(flags & (1 << BTREE_NODE_need_write)) != 0,
- b->level,
+ b->c.level,
b->written,
!list_empty_careful(&b->write_blocked),
b->will_make_reachable != 0,
diff --git a/fs/bcachefs/btree_io.h b/fs/bcachefs/btree_io.h
index 337d2bdd29e8..f3d7ec749b61 100644
--- a/fs/bcachefs/btree_io.h
+++ b/fs/bcachefs/btree_io.h
@@ -114,7 +114,7 @@ static inline void btree_node_write_if_need(struct bch_fs *c, struct btree *b,
break;
}
- six_unlock_type(&b->lock, lock_held);
+ six_unlock_type(&b->c.lock, lock_held);
btree_node_wait_on_io(b);
btree_node_lock_type(c, b, lock_held);
}
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index acbd7a31ba0e..93d710faddae 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -51,7 +51,7 @@ static inline bool btree_iter_pos_after_node(struct btree_iter *iter,
static inline bool btree_iter_pos_in_node(struct btree_iter *iter,
struct btree *b)
{
- return iter->btree_id == b->btree_id &&
+ return iter->btree_id == b->c.btree_id &&
!btree_iter_pos_before_node(iter, b) &&
!btree_iter_pos_after_node(iter, b);
}
@@ -68,11 +68,11 @@ void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
struct btree_iter *linked;
unsigned readers = 0;
- EBUG_ON(!btree_node_intent_locked(iter, b->level));
+ EBUG_ON(!btree_node_intent_locked(iter, b->c.level));
trans_for_each_iter(iter->trans, linked)
- if (linked->l[b->level].b == b &&
- btree_node_read_locked(linked, b->level))
+ if (linked->l[b->c.level].b == b &&
+ btree_node_read_locked(linked, b->c.level))
readers++;
/*
@@ -82,10 +82,10 @@ void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
* locked:
*/
atomic64_sub(__SIX_VAL(read_lock, readers),
- &b->lock.state.counter);
+ &b->c.lock.state.counter);
btree_node_lock_type(iter->trans->c, b, SIX_LOCK_write);
atomic64_add(__SIX_VAL(read_lock, readers),
- &b->lock.state.counter);
+ &b->c.lock.state.counter);
}
bool __bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
@@ -99,7 +99,7 @@ bool __bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
if (race_fault())
return false;
- if (six_relock_type(&b->lock, want, iter->l[level].lock_seq) ||
+ if (six_relock_type(&b->c.lock, want, iter->l[level].lock_seq) ||
(btree_node_lock_seq_matches(iter, b, level) &&
btree_node_lock_increment(iter->trans, b, level, want))) {
mark_btree_node_locked(iter, level, want);
@@ -125,8 +125,8 @@ static bool bch2_btree_node_upgrade(struct btree_iter *iter, unsigned level)
return false;
if (btree_node_locked(iter, level)
- ? six_lock_tryupgrade(&b->lock)
- : six_relock_type(&b->lock, SIX_LOCK_intent, iter->l[level].lock_seq))
+ ? six_lock_tryupgrade(&b->c.lock)
+ : six_relock_type(&b->c.lock, SIX_LOCK_intent, iter->l[level].lock_seq))
goto success;
if (btree_node_lock_seq_matches(iter, b, level) &&
@@ -162,7 +162,7 @@ static inline bool btree_iter_get_locks(struct btree_iter *iter,
? 0
: (unsigned long) iter->l[l].b,
is_btree_node(iter, l)
- ? iter->l[l].b->lock.state.seq
+ ? iter->l[l].b->c.lock.state.seq
: 0);
fail_idx = l;
@@ -268,7 +268,7 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
*/
if (linked->l[level].b == b &&
btree_node_locked_type(linked, level) >= type) {
- six_lock_increment(&b->lock, type);
+ six_lock_increment(&b->c.lock, type);
return true;
}
}
@@ -278,10 +278,10 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
return false;
}
- if (six_trylock_type(&b->lock, type))
+ if (six_trylock_type(&b->c.lock, type))
return true;
- if (six_lock_type(&b->lock, type, should_sleep_fn, p))
+ if (six_lock_type(&b->c.lock, type, should_sleep_fn, p))
return false;
bch2_time_stats_update(&trans->c->times[lock_to_time_stat(type)],
@@ -395,7 +395,7 @@ void __bch2_btree_iter_downgrade(struct btree_iter *iter,
btree_node_unlock(iter, l);
} else {
if (btree_node_intent_locked(iter, l)) {
- six_lock_downgrade(&iter->l[l].b->lock);
+ six_lock_downgrade(&iter->l[l].b->c.lock);
iter->nodes_intent_locked ^= 1 << l;
}
break;
@@ -545,7 +545,7 @@ void bch2_btree_trans_verify_iters(struct btree_trans *trans, struct btree *b)
return;
trans_for_each_iter_with_node(trans, b, iter)
- bch2_btree_iter_verify_level(iter, b->level);
+ bch2_btree_iter_verify_level(iter, b->c.level);
}
#else
@@ -576,7 +576,7 @@ static void __bch2_btree_iter_fix_key_modified(struct btree_iter *iter,
struct btree *b,
struct bkey_packed *where)
{
- struct btree_iter_level *l = &iter->l[b->level];
+ struct btree_iter_level *l = &iter->l[b->c.level];
struct bpos pos = btree_iter_search_key(iter);
if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
@@ -596,7 +596,7 @@ void bch2_btree_iter_fix_key_modified(struct btree_iter *iter,
trans_for_each_iter_with_node(iter->trans, b, linked) {
__bch2_btree_iter_fix_key_modified(linked, b, where);
- bch2_btree_iter_verify_level(linked, b->level);
+ bch2_btree_iter_verify_level(linked, b->c.level);
}
}
@@ -666,7 +666,7 @@ fixup_done:
*/
if (!bch2_btree_node_iter_end(node_iter) &&
iter_current_key_modified &&
- (b->level ||
+ (b->c.level ||
btree_node_type_is_extents(iter->btree_id))) {
struct bset_tree *t;
struct bkey_packed *k, *k2, *p;
@@ -693,7 +693,7 @@ fixup_done:
}
}
- if (!b->level &&
+ if (!b->c.level &&
node_iter == &iter->l[0].iter &&
iter_current_key_modified)
btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
@@ -709,7 +709,7 @@ void bch2_btree_node_iter_fix(struct btree_iter *iter,
struct bset_tree *t = bch2_bkey_to_bset(b, where);
struct btree_iter *linked;
- if (node_iter != &iter->l[b->level].iter) {
+ if (node_iter != &iter->l[b->c.level].iter) {
__bch2_btree_node_iter_fix(iter, b, node_iter, t,
where, clobber_u64s, new_u64s);
@@ -719,9 +719,9 @@ void bch2_btree_node_iter_fix(struct btree_iter *iter,
trans_for_each_iter_with_node(iter->trans, b, linked) {
__bch2_btree_node_iter_fix(linked, b,
- &linked->l[b->level].iter, t,
+ &linked->l[b->c.level].iter, t,
where, clobber_u64s, new_u64s);
- bch2_btree_iter_verify_level(linked, b->level);
+ bch2_btree_iter_verify_level(linked, b->c.level);
}
}
@@ -805,7 +805,7 @@ static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b)
if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
return;
- plevel = b->level + 1;
+ plevel = b->c.level + 1;
if (!btree_iter_node(iter, plevel))
return;
@@ -828,7 +828,7 @@ static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b)
}
if (!parent_locked)
- btree_node_unlock(iter, b->level + 1);
+ btree_node_unlock(iter, b->c.level + 1);
}
static inline void __btree_iter_init(struct btree_iter *iter,
@@ -848,11 +848,11 @@ static inline void btree_iter_node_set(struct btree_iter *iter,
btree_iter_verify_new_node(iter, b);
EBUG_ON(!btree_iter_pos_in_node(iter, b));
- EBUG_ON(b->lock.state.seq & 1);
+ EBUG_ON(b->c.lock.state.seq & 1);
- iter->l[b->level].lock_seq = b->lock.state.seq;
- iter->l[b->level].b = b;
- __btree_iter_init(iter, b->level);
+ iter->l[b->c.level].lock_seq = b->c.lock.state.seq;
+ iter->l[b->c.level].b = b;
+ __btree_iter_init(iter, b->c.level);
}
/*
@@ -871,12 +871,12 @@ void bch2_btree_iter_node_replace(struct btree_iter *iter, struct btree *b)
* the old node we're replacing has already been
* unlocked and the pointer invalidated
*/
- BUG_ON(btree_node_locked(linked, b->level));
+ BUG_ON(btree_node_locked(linked, b->c.level));
- t = btree_lock_want(linked, b->level);
+ t = btree_lock_want(linked, b->c.level);
if (t != BTREE_NODE_UNLOCKED) {
- six_lock_increment(&b->lock, t);
- mark_btree_node_locked(linked, b->level, t);
+ six_lock_increment(&b->c.lock, t);
+ mark_btree_node_locked(linked, b->c.level, t);
}
btree_iter_node_set(linked, b);
@@ -886,7 +886,7 @@ void bch2_btree_iter_node_replace(struct btree_iter *iter, struct btree *b)
void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
{
struct btree_iter *linked;
- unsigned level = b->level;
+ unsigned level = b->c.level;
trans_for_each_iter(iter->trans, linked)
if (linked->l[level].b == b) {
@@ -904,12 +904,12 @@ void bch2_btree_iter_reinit_node(struct btree_iter *iter, struct btree *b)
struct btree_iter *linked;
trans_for_each_iter_with_node(iter->trans, b, linked)
- __btree_iter_init(linked, b->level);
+ __btree_iter_init(linked, b->c.level);
}
static int lock_root_check_fn(struct six_lock *lock, void *p)
{
- struct btree *b = container_of(lock, struct btree, lock);
+ struct btree *b = container_of(lock, struct btree, c.lock);
struct btree **rootp = p;
return b == *rootp ? 0 : -1;
@@ -927,7 +927,7 @@ static inline int btree_iter_lock_root(struct btree_iter *iter,
while (1) {
b = READ_ONCE(*rootp);
- iter->level = READ_ONCE(b->level);
+ iter->level = READ_ONCE(b->c.level);
if (unlikely(iter->level < depth_want)) {
/*
@@ -949,7 +949,7 @@ static inline int btree_iter_lock_root(struct btree_iter *iter,
return -EINTR;
if (likely(b == READ_ONCE(*rootp) &&
- b->level == iter->level &&
+ b->c.level == iter->level &&
!race_fault())) {
for (i = 0; i < iter->level; i++)
iter->l[i].b = BTREE_ITER_NO_NODE_LOCK_ROOT;
@@ -962,7 +962,7 @@ static inline int btree_iter_lock_root(struct btree_iter *iter,
return 0;
}
- six_unlock_type(&b->lock, lock_type);
+ six_unlock_type(&b->c.lock, lock_type);
}
}
@@ -2002,7 +2002,7 @@ static inline void btree_iter_copy(struct btree_iter *dst,
for (i = 0; i < BTREE_MAX_DEPTH; i++)
if (btree_node_locked(dst, i))
- six_lock_increment(&dst->l[i].b->lock,
+ six_lock_increment(&dst->l[i].b->c.lock,
__btree_lock_want(dst, i));
dst->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
@@ -2317,8 +2317,8 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
bch2_bpos_to_text(out, trans->locking_pos);
pr_buf(out, " node %px l=%u %s:",
- b, b->level,
- bch2_btree_ids[b->btree_id]);
+ b, b->c.level,
+ bch2_btree_ids[b->c.btree_id]);
bch2_bpos_to_text(out, b->key.k.p);
pr_buf(out, "\n");
}
diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h
index b11d2a30d9c7..bc408f1272e7 100644
--- a/fs/bcachefs/btree_iter.h
+++ b/fs/bcachefs/btree_iter.h
@@ -27,13 +27,13 @@ static inline bool btree_node_lock_seq_matches(const struct btree_iter *iter,
* that write lock. The lock sequence number is incremented by taking
* and releasing write locks and is even when unlocked:
*/
- return iter->l[level].lock_seq >> 1 == b->lock.state.seq >> 1;
+ return iter->l[level].lock_seq >> 1 == b->c.lock.state.seq >> 1;
}
static inline struct btree *btree_node_parent(struct btree_iter *iter,
struct btree *b)
{
- return btree_iter_node(iter, b->level + 1);
+ return btree_iter_node(iter, b->c.level + 1);
}
static inline bool btree_trans_has_multiple_iters(const struct btree_trans *trans)
@@ -73,8 +73,8 @@ __trans_next_iter(struct btree_trans *trans, unsigned idx)
static inline bool __iter_has_node(const struct btree_iter *iter,
const struct btree *b)
{
- return iter->l[b->level].b == b &&
- btree_node_lock_seq_matches(iter, b, b->level);
+ return iter->l[b->c.level].b == b &&
+ btree_node_lock_seq_matches(iter, b, b->c.level);
}
static inline struct btree_iter *
diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h
index da2a0ebbc24f..81fbf3e18647 100644
--- a/fs/bcachefs/btree_locking.h
+++ b/fs/bcachefs/btree_locking.h
@@ -102,7 +102,7 @@ static inline void __btree_node_unlock(struct btree_iter *iter, unsigned level)
EBUG_ON(level >= BTREE_MAX_DEPTH);
if (lock_type != BTREE_NODE_UNLOCKED)
- six_unlock_type(&iter->l[level].b->lock, lock_type);
+ six_unlock_type(&iter->l[level].b->c.lock, lock_type);
mark_btree_node_unlocked(iter, level);
}
@@ -143,14 +143,14 @@ static inline void __btree_node_lock_type(struct bch_fs *c, struct btree *b,
{
u64 start_time = local_clock();
- six_lock_type(&b->lock, type, NULL, NULL);
+ six_lock_type(&b->c.lock, type, NULL, NULL);
bch2_time_stats_update(&c->times[lock_to_time_stat(type)], start_time);
}
static inline void btree_node_lock_type(struct bch_fs *c, struct btree *b,
enum six_lock_type type)
{
- if (!six_trylock_type(&b->lock, type))
+ if (!six_trylock_type(&b->c.lock, type))
__btree_node_lock_type(c, b, type);
}
@@ -167,7 +167,7 @@ static inline bool btree_node_lock_increment(struct btree_trans *trans,
trans_for_each_iter(trans, iter)
if (iter->l[level].b == b &&
btree_node_locked_type(iter, level) >= want) {
- six_lock_increment(&b->lock, want);
+ six_lock_increment(&b->c.lock, want);
return true;
}
@@ -197,7 +197,7 @@ static inline bool btree_node_lock(struct btree *b,
trans->locking_btree_id = iter->btree_id;
trans->locking_level = level;
#endif
- ret = likely(six_trylock_type(&b->lock, type)) ||
+ ret = likely(six_trylock_type(&b->c.lock, type)) ||
btree_node_lock_increment(trans, b, level, type) ||
__bch2_btree_node_lock(b, pos, level, iter, type,
should_sleep_fn, p);
@@ -230,13 +230,13 @@ bch2_btree_node_unlock_write_inlined(struct btree *b, struct btree_iter *iter)
{
struct btree_iter *linked;
- EBUG_ON(iter->l[b->level].b != b);
- EBUG_ON(iter->l[b->level].lock_seq + 1 != b->lock.state.seq);
+ EBUG_ON(iter->l[b->c.level].b != b);
+ EBUG_ON(iter->l[b->c.level].lock_seq + 1 != b->c.lock.state.seq);
trans_for_each_iter_with_node(iter->trans, b, linked)
- linked->l[b->level].lock_seq += 2;
+ linked->l[b->c.level].lock_seq += 2;
- six_unlock_write(&b->lock);
+ six_unlock_write(&b->c.lock);
}
void bch2_btree_node_unlock_write(struct btree *, struct btree_iter *);
@@ -245,10 +245,10 @@ void __bch2_btree_node_lock_write(struct btree *, struct btree_iter *);
static inline void bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
{
- EBUG_ON(iter->l[b->level].b != b);
- EBUG_ON(iter->l[b->level].lock_seq != b->lock.state.seq);
+ EBUG_ON(iter->l[b->c.level].b != b);
+ EBUG_ON(iter->l[b->c.level].lock_seq != b->c.lock.state.seq);
- if (unlikely(!six_trylock_write(&b->lock)))
+ if (unlikely(!six_trylock_write(&b->c.lock)))
__bch2_btree_node_lock_write(b, iter);
}
diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h
index 047b7b0776a1..9ca4032f49a6 100644
--- a/fs/bcachefs/btree_types.h
+++ b/fs/bcachefs/btree_types.h
@@ -60,17 +60,20 @@ struct btree_alloc {
BKEY_PADDED(k);
};
+struct btree_bkey_cached_common {
+ struct six_lock lock;
+ u8 level;
+ u8 btree_id;
+};
+
struct btree {
- /* Hottest entries first */
+ struct btree_bkey_cached_common c;
+
struct rhash_head hash;
u64 hash_val;
- struct six_lock lock;
-
unsigned long flags;
u16 written;
- u8 level;
- u8 btree_id;
u8 nsets;
u8 nr_key_bits;
@@ -495,7 +498,7 @@ static inline enum btree_node_type __btree_node_type(unsigned level, enum btree_
/* Type of keys @b contains: */
static inline enum btree_node_type btree_node_type(struct btree *b)
{
- return __btree_node_type(b->level, b->btree_id);
+ return __btree_node_type(b->c.level, b->c.btree_id);
}
static inline bool btree_node_type_is_extents(enum btree_node_type type)
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c
index 4d38943a4f0c..a626a7698d13 100644
--- a/fs/bcachefs/btree_update_interior.c
+++ b/fs/bcachefs/btree_update_interior.c
@@ -35,7 +35,7 @@ static void btree_node_interior_verify(struct btree *b)
struct bkey_s_c_btree_ptr_v2 bp;
struct bkey unpacked;
- BUG_ON(!b->level);
+ BUG_ON(!b->c.level);
bch2_btree_node_iter_init_from_start(&iter, b);
@@ -135,7 +135,7 @@ static void __btree_node_free(struct bch_fs *c, struct btree *b)
bch2_btree_node_hash_remove(&c->btree_cache, b);
- six_lock_wakeup_all(&b->lock);
+ six_lock_wakeup_all(&b->c.lock);
mutex_lock(&c->btree_cache.lock);
list_move(&b->list, &c->btree_cache.freeable);
@@ -152,7 +152,7 @@ void bch2_btree_node_free_never_inserted(struct bch_fs *c, struct btree *b)
btree_node_lock_type(c, b, SIX_LOCK_write);
__btree_node_free(c, b);
- six_unlock_write(&b->lock);
+ six_unlock_write(&b->c.lock);
bch2_open_buckets_put(c, &ob);
}
@@ -163,12 +163,12 @@ void bch2_btree_node_free_inmem(struct bch_fs *c, struct btree *b,
struct btree_iter *linked;
trans_for_each_iter(iter->trans, linked)
- BUG_ON(linked->l[b->level].b == b);
+ BUG_ON(linked->l[b->c.level].b == b);
- six_lock_write(&b->lock, NULL, NULL);
+ six_lock_write(&b->c.lock, NULL, NULL);
__btree_node_free(c, b);
- six_unlock_write(&b->lock);
- six_unlock_intent(&b->lock);
+ six_unlock_write(&b->c.lock);
+ six_unlock_intent(&b->c.lock);
}
static struct btree *__bch2_btree_node_alloc(struct bch_fs *c,
@@ -267,8 +267,8 @@ static struct btree *bch2_btree_node_alloc(struct btree_update *as, unsigned lev
set_btree_node_need_write(b);
bch2_bset_init_first(b, &b->data->keys);
- b->level = level;
- b->btree_id = as->btree_id;
+ b->c.level = level;
+ b->c.btree_id = as->btree_id;
memset(&b->nr, 0, sizeof(b->nr));
b->data->magic = cpu_to_le64(bset_magic(c));
@@ -321,7 +321,7 @@ struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *as,
{
struct btree *n;
- n = bch2_btree_node_alloc(as, b->level);
+ n = bch2_btree_node_alloc(as, b->c.level);
SET_BTREE_NODE_SEQ(n->data, BTREE_NODE_SEQ(b->data) + 1);
@@ -366,7 +366,7 @@ static struct btree *__btree_root_alloc(struct btree_update *as, unsigned level)
bch2_btree_build_aux_trees(b);
bch2_btree_update_add_new_node(as, b);
- six_unlock_write(&b->lock);
+ six_unlock_write(&b->c.lock);
return b;
}
@@ -380,7 +380,7 @@ static void bch2_btree_reserve_put(struct btree_update *as)
while (as->nr_prealloc_nodes) {
struct btree *b = as->prealloc_nodes[--as->nr_prealloc_nodes];
- six_unlock_write(&b->lock);
+ six_unlock_write(&b->c.lock);
if (c->btree_reserve_cache_nr <
ARRAY_SIZE(c->btree_reserve_cache)) {
@@ -396,9 +396,9 @@ static void bch2_btree_reserve_put(struct btree_update *as)
btree_node_lock_type(c, b, SIX_LOCK_write);
__btree_node_free(c, b);
- six_unlock_write(&b->lock);
+ six_unlock_write(&b->c.lock);
- six_unlock_intent(&b->lock);
+ six_unlock_intent(&b->c.lock);
}
mutex_unlock(&c->btree_reserve_cache_lock);
@@ -560,7 +560,7 @@ static void btree_update_nodes_written(struct btree_update *as)
if (!ret && as->b == b) {
struct bset *i = btree_bset_last(b);
- BUG_ON(!b->level);
+ BUG_ON(!b->c.level);
BUG_ON(!btree_node_dirty(b));
i->journal_seq = cpu_to_le64(
@@ -571,10 +571,10 @@ static void btree_update_nodes_written(struct btree_update *as)
}
mutex_unlock(&c->btree_interior_update_lock);
- six_unlock_write(&b->lock);
+ six_unlock_write(&b->c.lock);
btree_node_write_if_need(c, b, SIX_LOCK_intent);
- six_unlock_intent(&b->lock);
+ six_unlock_intent(&b->c.lock);
}
bch2_journal_pin_drop(&c->journal, &as->journal);
@@ -595,7 +595,7 @@ static void btree_update_nodes_written(struct btree_update *as)
btree_node_lock_type(c, b, SIX_LOCK_read);
btree_node_write_if_need(c, b, SIX_LOCK_read);
- six_unlock_read(&b->lock);
+ six_unlock_read(&b->c.lock);
}
for (i = 0; i < as->nr_open_buckets; i++)
@@ -694,7 +694,7 @@ static void btree_update_updated_root(struct btree_update *as, struct btree *b)
as->journal_u64s +=
journal_entry_set((void *) &as->journal_entries[as->journal_u64s],
BCH_JSET_ENTRY_btree_root,
- b->btree_id, b->level,
+ b->c.btree_id, b->c.level,
insert, insert->k.u64s);
mutex_lock(&c->btree_interior_update_lock);
@@ -946,7 +946,7 @@ static void bch2_btree_set_root_inmem(struct bch_fs *c, struct btree *b)
mutex_lock(&c->btree_root_lock);
BUG_ON(btree_node_root(c, b) &&
- (b->level < btree_node_root(c, b)->level ||
+ (b->c.level < btree_node_root(c, b)->c.level ||
!btree_node_dying(btree_node_root(c, b))));
btree_node_root(c, b) = b;
@@ -1014,7 +1014,7 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b
as->journal_u64s +=
journal_entry_set((void *) &as->journal_entries[as->journal_u64s],
BCH_JSET_ENTRY_btree_keys,
- b->btree_id, b->level,
+ b->c.btree_id, b->c.level,
insert, insert->k.u64s);
while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) &&
@@ -1039,7 +1039,7 @@ static struct btree *__btree_split_node(struct btree_update *as,
struct bset *set1, *set2;
struct bkey_packed *k, *prev = NULL;
- n2 = bch2_btree_node_alloc(as, n1->level);
+ n2 = bch2_btree_node_alloc(as, n1->c.level);
bch2_btree_update_add_new_node(as, n2);
n2->data->max_key = n1->data->max_key;
@@ -1108,7 +1108,7 @@ static struct btree *__btree_split_node(struct btree_update *as,
bch2_verify_btree_nr_keys(n1);
bch2_verify_btree_nr_keys(n2);
- if (n1->level) {
+ if (n1->c.level) {
btree_node_interior_verify(n1);
btree_node_interior_verify(n2);
}
@@ -1182,7 +1182,7 @@ static void btree_split(struct btree_update *as, struct btree *b,
u64 start_time = local_clock();
BUG_ON(!parent && (b != btree_node_root(c, b)));
- BUG_ON(!btree_node_intent_locked(iter, btree_node_root(c, b)->level));
+ BUG_ON(!btree_node_intent_locked(iter, btree_node_root(c, b)->c.level));
bch2_btree_interior_update_will_free_node(as, b);
@@ -1199,8 +1199,8 @@ static void btree_split(struct btree_update *as, struct btree *b,
bch2_btree_build_aux_trees(n2);
bch2_btree_build_aux_trees(n1);
- six_unlock_write(&n2->lock);
- six_unlock_write(&n1->lock);
+ six_unlock_write(&n2->c.lock);
+ six_unlock_write(&n1->c.lock);
bch2_btree_node_write(c, n2, SIX_LOCK_intent);
@@ -1214,7 +1214,7 @@ static void btree_split(struct btree_update *as, struct btree *b,
if (!parent) {
/* Depth increases, make a new root */
- n3 = __btree_root_alloc(as, b->level + 1);
+ n3 = __btree_root_alloc(as, b->c.level + 1);
n3->sib_u64s[0] = U16_MAX;
n3->sib_u64s[1] = U16_MAX;
@@ -1227,7 +1227,7 @@ static void btree_split(struct btree_update *as, struct btree *b,
trace_btree_compact(c, b);
bch2_btree_build_aux_trees(n1);
- six_unlock_write(&n1->lock);
+ six_unlock_write(&n1->c.lock);
if (parent)
bch2_keylist_add(&as->parent_keys, &n1->key);
@@ -1255,7 +1255,7 @@ static void btree_split(struct btree_update *as, struct btree *b,
/* Successful split, update the iterator to point to the new nodes: */
- six_lock_increment(&b->lock, SIX_LOCK_intent);
+ six_lock_increment(&b->c.lock, SIX_LOCK_intent);
bch2_btree_iter_node_drop(iter, b);
if (n3)
bch2_btree_iter_node_replace(iter, n3);
@@ -1272,10 +1272,10 @@ static void btree_split(struct btree_update *as, struct btree *b,
bch2_btree_node_free_inmem(c, b, iter);
if (n3)
- six_unlock_intent(&n3->lock);
+ six_unlock_intent(&n3->c.lock);
if (n2)
- six_unlock_intent(&n2->lock);
- six_unlock_intent(&n1->lock);
+ six_unlock_intent(&n2->c.lock);
+ six_unlock_intent(&n1->c.lock);
bch2_btree_trans_verify_locks(iter->trans);
@@ -1293,7 +1293,7 @@ bch2_btree_insert_keys_interior(struct btree_update *as, struct btree *b,
struct bkey_packed *k;
/* Don't screw up @iter's position: */
- node_iter = iter->l[b->level].iter;
+ node_iter = iter->l[b->c.level].iter;
/*
* btree_split(), btree_gc_coalesce() will insert keys before
@@ -1310,7 +1310,7 @@ bch2_btree_insert_keys_interior(struct btree_update *as, struct btree *b,
btree_update_updated_node(as, b);
trans_for_each_iter_with_node(iter->trans, b, linked)
- bch2_btree_node_iter_peek(&linked->l[b->level].iter, b);
+ bch2_btree_node_iter_peek(&linked->l[b->c.level].iter, b);
bch2_btree_trans_verify_iters(iter->trans, b);
}
@@ -1336,8 +1336,8 @@ void bch2_btree_insert_node(struct btree_update *as, struct btree *b,
int old_live_u64s = b->nr.live_u64s;
int live_u64s_added, u64s_added;
- BUG_ON(!btree_node_intent_locked(iter, btree_node_root(c, b)->level));
- BUG_ON(!b->level);
+ BUG_ON(!btree_node_intent_locked(iter, btree_node_root(c, b)->c.level));
+ BUG_ON(!b->c.level);
BUG_ON(!as || as->b);
bch2_verify_keylist_sorted(keys);
@@ -1374,7 +1374,7 @@ void bch2_btree_insert_node(struct btree_update *as, struct btree *b,
* the btree iterator yet, so the merge path's unlock/wait/relock dance
* won't work:
*/
- bch2_foreground_maybe_merge(c, iter, b->level,
+ bch2_foreground_maybe_merge(c, iter, b->c.level,
flags|BTREE_INSERT_NOUNLOCK);
return;
split:
@@ -1526,7 +1526,7 @@ retry:
b->sib_u64s[sib] = sib_u64s;
if (b->sib_u64s[sib] > BTREE_FOREGROUND_MERGE_THRESHOLD(c)) {
- six_unlock_intent(&m->lock);
+ six_unlock_intent(&m->c.lock);
goto out;
}
@@ -1556,7 +1556,7 @@ retry:
bch2_btree_interior_update_will_free_node(as, b);
bch2_btree_interior_update_will_free_node(as, m);
- n = bch2_btree_node_alloc(as, b->level);
+ n = bch2_btree_node_alloc(as, b->c.level);
bch2_btree_update_add_new_node(as, n);
btree_set_min(n, prev->data->min_key);
@@ -1569,7 +1569,7 @@ retry:
bch2_btree_sort_into(c, n, next);
bch2_btree_build_aux_trees(n);
- six_unlock_write(&n->lock);
+ six_unlock_write(&n->c.lock);
bkey_init(&delete.k);
delete.k.p = prev->key.k.p;
@@ -1582,7 +1582,7 @@ retry:
bch2_btree_update_get_open_buckets(as, n);
- six_lock_increment(&b->lock, SIX_LOCK_intent);
+ six_lock_increment(&b->c.lock, SIX_LOCK_intent);
bch2_btree_iter_node_drop(iter, b);
bch2_btree_iter_node_drop(iter, m);
@@ -1593,7 +1593,7 @@ retry:
bch2_btree_node_free_inmem(c, b, iter);
bch2_btree_node_free_inmem(c, m, iter);
- six_unlock_intent(&n->lock);
+ six_unlock_intent(&n->c.lock);
bch2_btree_update_done(as);
@@ -1615,7 +1615,7 @@ out:
return;
err_cycle_gc_lock:
- six_unlock_intent(&m->lock);
+ six_unlock_intent(&m->c.lock);
if (flags & BTREE_INSERT_NOUNLOCK)
goto out;
@@ -1628,7 +1628,7 @@ err_cycle_gc_lock:
goto err;
err_unlock:
- six_unlock_intent(&m->lock);
+ six_unlock_intent(&m->c.lock);
if (!(flags & BTREE_INSERT_GC_LOCK_HELD))
up_read(&c->gc_lock);
err:
@@ -1671,7 +1671,7 @@ static int __btree_node_rewrite(struct bch_fs *c, struct btree_iter *iter,
bch2_btree_update_add_new_node(as, n);
bch2_btree_build_aux_trees(n);
- six_unlock_write(&n->lock);
+ six_unlock_write(&n->c.lock);
trace_btree_gc_rewrite_node(c, b);
@@ -1686,11 +1686,11 @@ static int __btree_node_rewrite(struct bch_fs *c, struct btree_iter *iter,
bch2_btree_update_get_open_buckets(as, n);
- six_lock_increment(&b->lock, SIX_LOCK_intent);
+ six_lock_increment(&b->c.lock, SIX_LOCK_intent);
bch2_btree_iter_node_drop(iter, b);
bch2_btree_iter_node_replace(iter, n);
bch2_btree_node_free_inmem(c, b, iter);
- six_unlock_intent(&n->lock);
+ six_unlock_intent(&n->c.lock);
bch2_btree_update_done(as);
return 0;
@@ -1767,7 +1767,7 @@ static void __bch2_btree_node_update_key(struct bch_fs *c,
if (new_hash) {
bkey_copy(&new_hash->key, new_key);
ret = bch2_btree_node_hash_insert(&c->btree_cache,
- new_hash, b->level, b->btree_id);
+ new_hash, b->c.level, b->c.btree_id);
BUG_ON(ret);
}
@@ -1893,8 +1893,8 @@ err:
list_move(&new_hash->list, &c->btree_cache.freeable);
mutex_unlock(&c->btree_cache.lock);
- six_unlock_write(&new_hash->lock);
- six_unlock_intent(&new_hash->lock);
+ six_unlock_write(&new_hash->c.lock);
+ six_unlock_intent(&new_hash->c.lock);
}
up_read(&c->gc_lock);
closure_sync(&cl);
@@ -1934,8 +1934,8 @@ void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)
bch2_btree_cache_cannibalize_unlock(c);
set_btree_node_fake(b);
- b->level = 0;
- b->btree_id = id;
+ b->c.level = 0;
+ b->c.btree_id = id;
bkey_btree_ptr_init(&b->key);
b->key.k.p = POS_MAX;
@@ -1950,13 +1950,14 @@ void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)
b->data->format = bch2_btree_calc_format(b);
btree_node_set_format(b, b->data->format);
- ret = bch2_btree_node_hash_insert(&c->btree_cache, b, b->level, b->btree_id);
+ ret = bch2_btree_node_hash_insert(&c->btree_cache, b,
+ b->c.level, b->c.btree_id);
BUG_ON(ret);
bch2_btree_set_root_inmem(c, b);
- six_unlock_write(&b->lock);
- six_unlock_intent(&b->lock);
+ six_unlock_write(&b->c.lock);
+ six_unlock_intent(&b->c.lock);
}
ssize_t bch2_btree_updates_print(struct bch_fs *c, char *buf)
diff --git a/fs/bcachefs/btree_update_interior.h b/fs/bcachefs/btree_update_interior.h
index e00dc51ff3eb..4a5b9dcfbdd0 100644
--- a/fs/bcachefs/btree_update_interior.h
+++ b/fs/bcachefs/btree_update_interior.h
@@ -173,7 +173,7 @@ void bch2_btree_root_alloc(struct bch_fs *, enum btree_id);
static inline unsigned btree_update_reserve_required(struct bch_fs *c,
struct btree *b)
{
- unsigned depth = btree_node_root(c, b)->level + 1;
+ unsigned depth = btree_node_root(c, b)->c.level + 1;
/*
* Number of nodes we might have to allocate in a worst case btree
@@ -181,9 +181,9 @@ static inline unsigned btree_update_reserve_required(struct bch_fs *c,
* a new root, unless we're already at max depth:
*/
if (depth < BTREE_MAX_DEPTH)
- return (depth - b->level) * 2 + 1;
+ return (depth - b->c.level) * 2 + 1;
else
- return (depth - b->level) * 2 - 1;
+ return (depth - b->c.level) * 2 - 1;
}
static inline void btree_node_reset_sib_u64s(struct btree *b)
diff --git a/fs/bcachefs/btree_update_leaf.c b/fs/bcachefs/btree_update_leaf.c
index 283c10feb81f..1a1fd230e4b9 100644
--- a/fs/bcachefs/btree_update_leaf.c
+++ b/fs/bcachefs/btree_update_leaf.c
@@ -135,7 +135,7 @@ static void __btree_node_flush(struct journal *j, struct journal_entry_pin *pin,
btree_node_lock_type(c, b, SIX_LOCK_read);
bch2_btree_node_write_cond(c, b,
(btree_current_write(b) == w && w->journal.seq == seq));
- six_unlock_read(&b->lock);
+ six_unlock_read(&b->c.lock);
}
static void btree_node_flush0(struct journal *j, struct journal_entry_pin *pin, u64 seq)
diff --git a/fs/bcachefs/debug.c b/fs/bcachefs/debug.c
index 69b123bad83b..4e0d14e37287 100644
--- a/fs/bcachefs/debug.c
+++ b/fs/bcachefs/debug.c
@@ -52,8 +52,8 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b)
bkey_copy(&v->key, &b->key);
v->written = 0;
- v->level = b->level;
- v->btree_id = b->btree_id;
+ v->c.level = b->c.level;
+ v->c.btree_id = b->c.btree_id;
bch2_btree_keys_init(v, &c->expensive_debug_checks);
if (bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c
index 1f26d9e19fe9..26e5767aa5de 100644
--- a/fs/bcachefs/recovery.c
+++ b/fs/bcachefs/recovery.c
@@ -188,7 +188,7 @@ void bch2_btree_and_journal_iter_init_node_iter(struct btree_and_journal_iter *i
iter->b = b;
bch2_btree_node_iter_init_from_start(&iter->node_iter, iter->b);
bch2_journal_iter_init(&iter->journal, journal_keys,
- b->btree_id, b->level, b->data->min_key);
+ b->c.btree_id, b->c.level, b->data->min_key);
}
/* Walk btree, overlaying keys from the journal: */
@@ -206,11 +206,11 @@ static int bch2_btree_and_journal_walk_recurse(struct bch_fs *c, struct btree *b
bch2_btree_and_journal_iter_init_node_iter(&iter, journal_keys, b);
while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
- ret = key_fn(c, btree_id, b->level, k);
+ ret = key_fn(c, btree_id, b->c.level, k);
if (ret)
break;
- if (b->level) {
+ if (b->c.level) {
struct btree *child;
BKEY_PADDED(k) tmp;
@@ -219,9 +219,9 @@ static int bch2_btree_and_journal_walk_recurse(struct bch_fs *c, struct btree *b
bch2_btree_and_journal_iter_advance(&iter);
- if (b->level > 0) {
+ if (b->c.level > 0) {
child = bch2_btree_node_get_noiter(c, &tmp.k,
- b->btree_id, b->level - 1);
+ b->c.btree_id, b->c.level - 1);
ret = PTR_ERR_OR_ZERO(child);
if (ret)
break;
@@ -229,7 +229,7 @@ static int bch2_btree_and_journal_walk_recurse(struct bch_fs *c, struct btree *b
ret = (node_fn ? node_fn(c, b) : 0) ?:
bch2_btree_and_journal_walk_recurse(c, child,
journal_keys, btree_id, node_fn, key_fn);
- six_unlock_read(&child->lock);
+ six_unlock_read(&child->c.lock);
if (ret)
break;
@@ -253,12 +253,12 @@ int bch2_btree_and_journal_walk(struct bch_fs *c, struct journal_keys *journal_k
if (btree_node_fake(b))
return 0;
- six_lock_read(&b->lock, NULL, NULL);
+ six_lock_read(&b->c.lock, NULL, NULL);
ret = (node_fn ? node_fn(c, b) : 0) ?:
bch2_btree_and_journal_walk_recurse(c, b, journal_keys, btree_id,
node_fn, key_fn) ?:
- key_fn(c, btree_id, b->level + 1, bkey_i_to_s_c(&b->key));
- six_unlock_read(&b->lock);
+ key_fn(c, btree_id, b->c.level + 1, bkey_i_to_s_c(&b->key));
+ six_unlock_read(&b->c.lock);
return ret;
}
diff --git a/include/trace/events/bcachefs.h b/include/trace/events/bcachefs.h
index 01a9cc736cab..bafbccafae30 100644
--- a/include/trace/events/bcachefs.h
+++ b/include/trace/events/bcachefs.h
@@ -144,8 +144,8 @@ DECLARE_EVENT_CLASS(btree_node,
TP_fast_assign(
memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
- __entry->level = b->level;
- __entry->id = b->btree_id;
+ __entry->level = b->c.level;
+ __entry->id = b->c.btree_id;
__entry->inode = b->key.k.p.inode;
__entry->offset = b->key.k.p.offset;
),
@@ -262,7 +262,7 @@ TRACE_EVENT(btree_insert_key,
),
TP_fast_assign(
- __entry->id = b->btree_id;
+ __entry->id = b->c.btree_id;
__entry->inode = k->k.p.inode;
__entry->offset = k->k.p.offset;
__entry->size = k->k.size;