summaryrefslogtreecommitdiff
path: root/libbcachefs/bset.c
diff options
context:
space:
mode:
Diffstat (limited to 'libbcachefs/bset.c')
-rw-r--r--libbcachefs/bset.c54
1 files changed, 11 insertions, 43 deletions
diff --git a/libbcachefs/bset.c b/libbcachefs/bset.c
index 575e1d0b..1b66b2c7 100644
--- a/libbcachefs/bset.c
+++ b/libbcachefs/bset.c
@@ -304,11 +304,6 @@ struct bkey_float {
};
#define BKEY_MANTISSA_BITS 16
-static unsigned bkey_float_byte_offset(unsigned idx)
-{
- return idx * sizeof(struct bkey_float);
-}
-
struct ro_aux_tree {
u8 nothing[0];
struct bkey_float f[];
@@ -328,8 +323,7 @@ static unsigned bset_aux_tree_buf_end(const struct bset_tree *t)
return t->aux_data_offset;
case BSET_RO_AUX_TREE:
return t->aux_data_offset +
- DIV_ROUND_UP(t->size * sizeof(struct bkey_float) +
- t->size * sizeof(u8), 8);
+ DIV_ROUND_UP(t->size * sizeof(struct bkey_float), 8);
case BSET_RW_AUX_TREE:
return t->aux_data_offset +
DIV_ROUND_UP(sizeof(struct rw_aux_tree) * t->size, 8);
@@ -360,14 +354,6 @@ static struct ro_aux_tree *ro_aux_tree_base(const struct btree *b,
return __aux_tree_base(b, t);
}
-static u8 *ro_aux_tree_prev(const struct btree *b,
- const struct bset_tree *t)
-{
- EBUG_ON(bset_aux_tree_type(t) != BSET_RO_AUX_TREE);
-
- return __aux_tree_base(b, t) + bkey_float_byte_offset(t->size);
-}
-
static struct bkey_float *bkey_float(const struct btree *b,
const struct bset_tree *t,
unsigned idx)
@@ -479,15 +465,6 @@ static inline struct bkey_packed *tree_to_bkey(const struct btree *b,
bkey_float(b, t, j)->key_offset);
}
-static struct bkey_packed *tree_to_prev_bkey(const struct btree *b,
- const struct bset_tree *t,
- unsigned j)
-{
- unsigned prev_u64s = ro_aux_tree_prev(b, t)[j];
-
- return (void *) ((u64 *) tree_to_bkey(b, t, j)->_data - prev_u64s);
-}
-
static struct rw_aux_tree *rw_aux_tree(const struct btree *b,
const struct bset_tree *t)
{
@@ -585,8 +562,7 @@ static unsigned rw_aux_tree_bsearch(struct btree *b,
}
static inline unsigned bkey_mantissa(const struct bkey_packed *k,
- const struct bkey_float *f,
- unsigned idx)
+ const struct bkey_float *f)
{
u64 v;
@@ -617,7 +593,7 @@ static __always_inline void make_bfloat(struct btree *b, struct bset_tree *t,
struct bkey_packed *m = tree_to_bkey(b, t, j);
struct bkey_packed *l = is_power_of_2(j)
? min_key
- : tree_to_prev_bkey(b, t, j >> ffs(j));
+ : tree_to_bkey(b, t, j >> ffs(j));
struct bkey_packed *r = is_power_of_2(j + 1)
? max_key
: tree_to_bkey(b, t, j >> (ffz(j) + 1));
@@ -668,7 +644,7 @@ static __always_inline void make_bfloat(struct btree *b, struct bset_tree *t,
EBUG_ON(shift < 0 || shift >= BFLOAT_FAILED);
f->exponent = shift;
- mantissa = bkey_mantissa(m, f, j);
+ mantissa = bkey_mantissa(m, f);
/*
* If we've got garbage bits, set them to all 1s - it's legal for the
@@ -690,8 +666,7 @@ static unsigned __bset_tree_capacity(struct btree *b, const struct bset_tree *t)
static unsigned bset_ro_tree_capacity(struct btree *b, const struct bset_tree *t)
{
- return __bset_tree_capacity(b, t) /
- (sizeof(struct bkey_float) + sizeof(u8));
+ return __bset_tree_capacity(b, t) / sizeof(struct bkey_float);
}
static unsigned bset_rw_tree_capacity(struct btree *b, const struct bset_tree *t)
@@ -720,7 +695,7 @@ static noinline void __build_rw_aux_tree(struct btree *b, struct bset_tree *t)
static noinline void __build_ro_aux_tree(struct btree *b, struct bset_tree *t)
{
- struct bkey_packed *prev = NULL, *k = btree_bkey_first(b, t);
+ struct bkey_packed *k = btree_bkey_first(b, t);
struct bkey_i min_key, max_key;
unsigned cacheline = 1;
@@ -733,12 +708,12 @@ retry:
return;
}
- t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1;
+ t->extra = eytzinger1_extra(t->size - 1);
/* First we figure out where the first key in each cacheline is */
eytzinger1_for_each(j, t->size - 1) {
while (bkey_to_cacheline(b, t, k) < cacheline)
- prev = k, k = bkey_p_next(k);
+ k = bkey_p_next(k);
if (k >= btree_bkey_last(b, t)) {
/* XXX: this path sucks */
@@ -746,17 +721,12 @@ retry:
goto retry;
}
- ro_aux_tree_prev(b, t)[j] = prev->u64s;
bkey_float(b, t, j)->key_offset =
bkey_to_cacheline_offset(b, t, cacheline++, k);
- EBUG_ON(tree_to_prev_bkey(b, t, j) != prev);
EBUG_ON(tree_to_bkey(b, t, j) != k);
}
- while (k != btree_bkey_last(b, t))
- prev = k, k = bkey_p_next(k);
-
if (!bkey_pack_pos(bkey_to_packed(&min_key), b->data->min_key, b)) {
bkey_init(&min_key.k);
min_key.k.p = b->data->min_key;
@@ -999,7 +969,6 @@ static void bch2_bset_fix_lookup_table(struct btree *b,
}
void bch2_bset_insert(struct btree *b,
- struct btree_node_iter *iter,
struct bkey_packed *where,
struct bkey_i *insert,
unsigned clobber_u64s)
@@ -1098,8 +1067,7 @@ static inline void prefetch_four_cachelines(void *p)
}
static inline bool bkey_mantissa_bits_dropped(const struct btree *b,
- const struct bkey_float *f,
- unsigned idx)
+ const struct bkey_float *f)
{
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
unsigned key_bits_start = b->format.key_u64s * 64 - b->nr_key_bits;
@@ -1133,9 +1101,9 @@ static struct bkey_packed *bset_search_tree(const struct btree *b,
goto slowpath;
l = f->mantissa;
- r = bkey_mantissa(packed_search, f, n);
+ r = bkey_mantissa(packed_search, f);
- if (unlikely(l == r) && bkey_mantissa_bits_dropped(b, f, n))
+ if (unlikely(l == r) && bkey_mantissa_bits_dropped(b, f))
goto slowpath;
n = n * 2 + (l < r);