summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/md/bcache/bset.c209
-rw-r--r--drivers/md/bcache/bset.h6
-rw-r--r--drivers/md/bcache/btree_cache.c8
-rw-r--r--drivers/md/bcache/btree_cache.h5
-rw-r--r--drivers/md/bcache/btree_gc.c6
-rw-r--r--drivers/md/bcache/btree_io.c72
-rw-r--r--drivers/md/bcache/btree_io.h2
-rw-r--r--drivers/md/bcache/btree_iter.c6
-rw-r--r--drivers/md/bcache/btree_types.h45
-rw-r--r--drivers/md/bcache/btree_update.c20
-rw-r--r--drivers/md/bcache/btree_update.h7
-rw-r--r--drivers/md/bcache/extents.c22
-rw-r--r--drivers/md/bcache/sysfs.c2
13 files changed, 222 insertions, 188 deletions
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 6936efbc81fc..05d646787c88 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -25,8 +25,8 @@ struct bset_tree *bch_bkey_to_bset(struct btree *b, struct bkey_packed *k)
struct bset_tree *t;
for_each_bset(b, t)
- if (k >= t->data->start &&
- k < bset_bkey_last(t->data))
+ if (k >= bset(b, t)->start &&
+ k < bset_bkey_last(bset(b, t)))
return t;
BUG();
@@ -95,7 +95,7 @@ void bch_dump_btree_node(struct btree *b)
console_lock();
for_each_bset(b, t)
- bch_dump_bset(b, t->data, t - b->set);
+ bch_dump_bset(b, bset(b, t), t - b->set);
console_unlock();
}
@@ -114,7 +114,7 @@ void bch_dump_btree_node_iter(struct btree *b,
bch_bkey_to_text(buf, sizeof(buf), &uk);
printk(KERN_ERR "set %zu key %zi/%u: %s\n", t - b->set,
- k->_data - t->data->_data, t->data->u64s, buf);
+ k->_data - bset(b, t)->_data, bset(b, t)->u64s, buf);
}
}
@@ -141,8 +141,8 @@ void __bch_verify_btree_nr_keys(struct btree *b)
struct btree_nr_keys nr = { 0 };
for_each_bset(b, t)
- for (k = t->data->start;
- k != bset_bkey_last(t->data);
+ for (k = bset(b, t)->start;
+ k != bset_bkey_last(bset(b, t));
k = bkey_next(k))
if (!bkey_whiteout(k))
btree_keys_account_key_add(&nr, t - b->set, k);
@@ -176,6 +176,7 @@ void bch_btree_node_iter_verify(struct btree_node_iter *iter,
{
struct btree_node_iter_set *set;
struct bset_tree *t;
+ struct bset *i;
struct bkey_packed *k, *first;
BUG_ON(iter->used > MAX_BSETS);
@@ -185,10 +186,10 @@ void bch_btree_node_iter_verify(struct btree_node_iter *iter,
btree_node_iter_for_each(iter, set) {
k = __btree_node_offset_to_key(b, set->k);
- t = bch_bkey_to_bset(b, k);
+ i = bset(b, bch_bkey_to_bset(b, k));
BUG_ON(__btree_node_offset_to_key(b, set->end) !=
- bset_bkey_last(t->data));
+ bset_bkey_last(i));
BUG_ON(set + 1 < iter->data + iter->used &&
btree_node_iter_cmp(iter, b, set[0], set[1]) > 0);
@@ -196,12 +197,15 @@ void bch_btree_node_iter_verify(struct btree_node_iter *iter,
first = __btree_node_offset_to_key(b, iter->data[0].k);
- for_each_bset(b, t)
- if (bch_btree_node_iter_bset_pos(iter, b, t->data) ==
- bset_bkey_last(t->data) &&
- (k = bkey_prev_all(b, t, bset_bkey_last(t->data))))
+ for_each_bset(b, t) {
+ i = bset(b, t);
+
+ if (bch_btree_node_iter_bset_pos(iter, b, i) ==
+ bset_bkey_last(i) &&
+ (k = bkey_prev_all(b, t, bset_bkey_last(i))))
BUG_ON(__btree_node_iter_cmp(iter->is_extents, b,
k, first) > 0);
+ }
}
void bch_verify_key_order(struct btree *b,
@@ -226,20 +230,22 @@ void bch_verify_key_order(struct btree *b,
}
k = bkey_next(where);
- BUG_ON(k != bset_bkey_last(t->data) &&
+ BUG_ON(k != bset_bkey_last(bset(b, t)) &&
keys_out_of_order(b, where, k, iter->is_extents));
for_each_bset(b, t) {
- if (!t->data->u64s)
+ struct bset *i = bset(b, t);
+
+ if (!i->u64s)
continue;
- if (where >= t->data->start &&
- where < bset_bkey_last(t->data))
+ if (where >= i->start &&
+ where < bset_bkey_last(i))
continue;
- k = bch_btree_node_iter_bset_pos(iter, b, t->data);
+ k = bch_btree_node_iter_bset_pos(iter, b, i);
- if (k == bset_bkey_last(t->data))
+ if (k == bset_bkey_last(i))
k = bkey_prev_all(b, t, k);
while (bkey_cmp_left_packed_byval(b, k, bkey_start_pos(&uw)) > 0 &&
@@ -247,7 +253,7 @@ void bch_verify_key_order(struct btree *b,
k = prev;
for (;
- k != bset_bkey_last(t->data);
+ k != bset_bkey_last(i);
k = bkey_next(k)) {
uk = bkey_unpack_key(b, k);
@@ -460,7 +466,7 @@ void bch_btree_keys_init(struct btree *b, bool *expensive_debug_checks)
b->expensive_debug_checks = expensive_debug_checks;
#endif
for (i = 0; i < MAX_BSETS; i++)
- b->set[i].data = NULL;
+ b->set[i].data_offset = U16_MAX;
bch_bset_set_no_aux_tree(b, b->set);
}
@@ -556,7 +562,7 @@ static inline unsigned __to_inorder(unsigned j, unsigned size, unsigned extra)
return j;
}
-static inline unsigned to_inorder(unsigned j, struct bset_tree *t)
+static inline unsigned to_inorder(unsigned j, const struct bset_tree *t)
{
return __to_inorder(j, t->size, t->extra);
}
@@ -577,7 +583,7 @@ static unsigned __inorder_to_tree(unsigned j, unsigned size, unsigned extra)
return j;
}
-static unsigned inorder_to_tree(unsigned j, struct bset_tree *t)
+static unsigned inorder_to_tree(unsigned j, const struct bset_tree *t)
{
return __inorder_to_tree(j, t->size, t->extra);
}
@@ -638,44 +644,51 @@ void inorder_test(void)
* of the previous key so we can walk backwards to it from t->tree[j]'s key.
*/
-static struct bkey_packed *cacheline_to_bkey(struct bset_tree *t,
+static struct bkey_packed *cacheline_to_bkey(const struct btree *b,
+ const struct bset_tree *t,
unsigned cacheline,
int offset)
{
- return ((void *) t->data) + cacheline * BSET_CACHELINE + offset * 8;
+ return ((void *) bset(b, t)) + cacheline * BSET_CACHELINE + offset * 8;
}
-static unsigned bkey_to_cacheline(struct bset_tree *t, struct bkey_packed *k)
+static unsigned bkey_to_cacheline(const struct btree *b,
+ const struct bset_tree *t,
+ const struct bkey_packed *k)
{
- return ((void *) k - (void *) t->data) / BSET_CACHELINE;
+ return ((void *) k - (void *) bset(b, t)) / BSET_CACHELINE;
}
-static ssize_t __bkey_to_cacheline_offset(struct bset_tree *t,
+static ssize_t __bkey_to_cacheline_offset(const struct btree *b,
+ const struct bset_tree *t,
unsigned cacheline,
- struct bkey_packed *k)
+ const struct bkey_packed *k)
{
- return (u64 *) k - (u64 *) cacheline_to_bkey(t, cacheline, 0);
+ return (u64 *) k - (u64 *) cacheline_to_bkey(b, t, cacheline, 0);
}
-static unsigned bkey_to_cacheline_offset(struct bset_tree *t,
+static unsigned bkey_to_cacheline_offset(const struct btree *b,
+ const struct bset_tree *t,
unsigned cacheline,
- struct bkey_packed *k)
+ const struct bkey_packed *k)
{
- size_t m = __bkey_to_cacheline_offset(t, cacheline, k);
+ size_t m = __bkey_to_cacheline_offset(b, t, cacheline, k);
EBUG_ON(m > BFLOAT_KEY_OFFSET_MAX);
return m;
}
static struct bkey_packed *tree_to_bkey(const struct btree *b,
- struct bset_tree *t, unsigned j)
+ const struct bset_tree *t,
+ unsigned j)
{
- return cacheline_to_bkey(t, to_inorder(j, t),
+ return cacheline_to_bkey(b, t, to_inorder(j, t),
bkey_float(b, t, j)->key_offset);
}
-static struct bkey_packed *tree_to_prev_bkey(struct btree *b,
- struct bset_tree *t, unsigned j)
+static struct bkey_packed *tree_to_prev_bkey(const struct btree *b,
+ const struct bset_tree *t,
+ unsigned j)
{
unsigned prev_u64s = ro_aux_tree_prev(b, t)[j];
@@ -698,7 +711,7 @@ static struct bkey_packed *table_to_bkey(const struct btree *b,
struct bset_tree *t,
unsigned cacheline)
{
- return cacheline_to_bkey(t, cacheline, rw_aux_tree(b, t)[cacheline]);
+ return cacheline_to_bkey(b, t, cacheline, rw_aux_tree(b, t)[cacheline]);
}
static inline unsigned bfloat_mantissa(const struct bkey_float *f,
@@ -746,16 +759,16 @@ static void make_bfloat(struct btree *b,
struct bkey_float *f = bkey_float(b, t, j);
struct bkey_packed *m = tree_to_bkey(b, t, j);
struct bkey_packed *p = tree_to_prev_bkey(b, t, j);
+ struct bset *i = bset(b, t);
unsigned bits = j < BFLOAT_32BIT_NR ? 32 : 16;
unsigned mantissa;
struct bkey_packed *l = is_power_of_2(j)
- ? t->data->start
+ ? i->start
: tree_to_prev_bkey(b, t, j >> ffs(j));
struct bkey_packed *r = is_power_of_2(j + 1)
- ? bset_bkey_idx(t->data,
- le16_to_cpu(t->data->u64s) - t->end.u64s)
+ ? bset_bkey_idx(i, le16_to_cpu(i->u64s) - t->end.u64s)
: tree_to_bkey(b, t, j >> (ffz(j) + 1));
int shift, exponent;
@@ -880,14 +893,14 @@ static void bch_bset_lookup_table_add_entries(struct btree *b,
BUG_ON(t->size > bset_rw_tree_capacity(b, t));
for (k = table_to_bkey(b, t, t->size - 1);
- k != bset_bkey_last(t->data);
+ k != bset_bkey_last(bset(b, t));
k = bkey_next(k))
- while (bkey_to_cacheline(t, k) >= t->size) {
+ while (bkey_to_cacheline(b, t, k) >= t->size) {
if (t->size == bset_rw_tree_capacity(b, t))
return;
rw_aux_tree(b, t)[t->size] =
- bkey_to_cacheline_offset(t, t->size, k);
+ bkey_to_cacheline_offset(b, t, t->size, k);
t->size++;
}
}
@@ -896,17 +909,18 @@ static void __build_rw_aux_tree(struct btree *b, struct bset_tree *t)
{
t->size = 1;
t->extra = BSET_RW_AUX_TREE_VAL;
- rw_aux_tree(b, t)[0] = bkey_to_cacheline_offset(t, 0, t->data->start);
+ rw_aux_tree(b, t)[0] = bkey_to_cacheline_offset(b, t, 0, bset(b, t)->start);
bch_bset_lookup_table_add_entries(b, t);
}
static void __build_ro_aux_tree(struct btree *b, struct bset_tree *t)
{
- struct bkey_packed *prev = NULL, *k = t->data->start;
+ struct bset *i = bset(b, t);
+ struct bkey_packed *prev = NULL, *k = i->start;
unsigned j, cacheline = 1;
- t->size = min(bkey_to_cacheline(t, bset_bkey_last(t->data)),
+ t->size = min(bkey_to_cacheline(b, t, bset_bkey_last(i)),
bset_ro_tree_capacity(b, t));
retry:
if (t->size < 2) {
@@ -921,23 +935,23 @@ retry:
for (j = inorder_next(0, t->size);
j;
j = inorder_next(j, t->size)) {
- while (bkey_to_cacheline(t, k) < cacheline)
+ while (bkey_to_cacheline(b, t, k) < cacheline)
prev = k, k = bkey_next(k);
- if (k >= bset_bkey_last(t->data)) {
+ if (k >= bset_bkey_last(i)) {
t->size--;
goto retry;
}
ro_aux_tree_prev(b, t)[j] = prev->u64s;
bkey_float(b, t, j)->key_offset =
- bkey_to_cacheline_offset(t, cacheline++, k);
+ bkey_to_cacheline_offset(b, t, cacheline++, k);
BUG_ON(tree_to_prev_bkey(b, t, j) != prev);
BUG_ON(tree_to_bkey(b, t, j) != k);
}
- while (bkey_next(k) != bset_bkey_last(t->data))
+ while (bkey_next(k) != bset_bkey_last(i))
k = bkey_next(k);
t->end = *k;
@@ -993,7 +1007,7 @@ void bch_bset_init_first(struct btree *b, struct bset *i)
BUG_ON(b->nsets);
t = &b->set[b->nsets++];
- t->data = i;
+ set_btree_bset(b, t, i);
memset(i, 0, sizeof(*i));
get_random_bytes(&i->seq, sizeof(i->seq));
SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
@@ -1006,35 +1020,36 @@ void bch_bset_init_next(struct btree *b, struct bset *i)
BUG_ON(b->nsets >= MAX_BSETS);
t = &b->set[b->nsets++];
- t->data = i;
+ set_btree_bset(b, t, i);
memset(i, 0, sizeof(*i));
- i->seq = b->set->data->seq;
+ i->seq = btree_bset_first(b)->seq;
SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
}
static struct bkey_packed *__bkey_prev(struct btree *b, struct bset_tree *t,
struct bkey_packed *k)
{
+ struct bset *i = bset(b, t);
struct bkey_packed *p;
int j;
- EBUG_ON(k < t->data->start || k > bset_bkey_last(t->data));
+ EBUG_ON(k < i->start || k > bset_bkey_last(i));
- if (k == t->data->start)
+ if (k == i->start)
return NULL;
- j = min_t(unsigned, t->size, bkey_to_cacheline(t, k));
+ j = min_t(unsigned, t->size, bkey_to_cacheline(b, t, k));
do {
if (--j <= 0) {
- p = t->data->start;
+ p = i->start;
break;
}
switch (bset_aux_tree_type(t)) {
case BSET_NO_AUX_TREE:
- p = t->data->start;
+ p = i->start;
break;
case BSET_RO_AUX_TREE:
p = tree_to_bkey(b, t, inorder_to_tree(j, t));
@@ -1099,13 +1114,13 @@ void bch_bset_fix_invalidated_key(struct btree *b, struct bset_tree *t,
if (bset_aux_tree_type(t) != BSET_RO_AUX_TREE)
return;
- inorder = bkey_to_cacheline(t, k);
+ inorder = bkey_to_cacheline(b, t, k);
- if (k == t->data->start)
+ if (k == bset(b, t)->start)
for (j = 1; j < t->size; j = j * 2)
make_bfloat(b, t, j);
- if (bkey_next(k) == bset_bkey_last(t->data)) {
+ if (bkey_next(k) == bset_bkey_last(bset(b, t))) {
t->end = *k;
for (j = 1; j < t->size; j = j * 2 + 1)
@@ -1155,15 +1170,15 @@ static void bch_bset_fix_lookup_table(struct btree *b,
return;
/* Did we just truncate? */
- if (where == bset_bkey_last(t->data)) {
+ if (where == bset_bkey_last(bset(b, t))) {
while (t->size > 1 &&
- table_to_bkey(b, t, t->size - 1) >= bset_bkey_last(t->data))
+ table_to_bkey(b, t, t->size - 1) >= bset_bkey_last(bset(b, t)))
t->size--;
goto verify;
}
/* Find first entry in the lookup table strictly greater than where: */
- j = bkey_to_cacheline(t, where);
+ j = bkey_to_cacheline(b, t, where);
while (j < t->size && table_to_bkey(b, t, j) <= where)
j++;
@@ -1178,23 +1193,23 @@ static void bch_bset_fix_lookup_table(struct btree *b,
if (table_to_bkey(b, t, j) <
(struct bkey_packed *) ((u64 *) where + clobber_u64s))
- new_offset = __bkey_to_cacheline_offset(t, j, where);
+ new_offset = __bkey_to_cacheline_offset(b, t, j, where);
else
new_offset = (int) rw_aux_tree(b, t)[j] + shift;
if (new_offset > 7) {
k = table_to_bkey(b, t, j - 1);
- new_offset = __bkey_to_cacheline_offset(t, j, k);
+ new_offset = __bkey_to_cacheline_offset(b, t, j, k);
}
while (new_offset < 0) {
- k = bkey_next(cacheline_to_bkey(t, j, new_offset));
- if (k == bset_bkey_last(t->data)) {
+ k = bkey_next(cacheline_to_bkey(b, t, j, new_offset));
+ if (k == bset_bkey_last(bset(b, t))) {
t->size = j;
goto verify;
}
- new_offset = __bkey_to_cacheline_offset(t, j, k);
+ new_offset = __bkey_to_cacheline_offset(b, t, j, k);
}
BUG_ON(new_offset > U8_MAX);
@@ -1221,15 +1236,15 @@ static void bch_bset_verify_lookup_table(struct btree *b,
return;
BUG_ON(t->size < 1);
- BUG_ON(table_to_bkey(b, t, 0) != t->data->start);
+ BUG_ON(table_to_bkey(b, t, 0) != bset(b, t)->start);
- if (!t->data->u64s) {
+ if (!bset(b, t)->u64s) {
BUG_ON(t->size != 1);
return;
}
- for (k = t->data->start;
- k != bset_bkey_last(t->data);
+ for (k = bset(b, t)->start;
+ k != bset_bkey_last(bset(b, t));
k = bkey_next(k))
while (k == table_to_bkey(b, t, j))
if (++j == t->size)
@@ -1246,7 +1261,7 @@ void bch_bset_insert(struct btree *b,
{
struct bkey_format *f = &b->format;
struct bset_tree *t = bset_tree_last(b);
- struct bset *i = t->data;
+ struct bset *i = bset(b, t);
struct bkey_packed packed, *src = bkey_to_packed(insert);
if (bkey_pack_key(&packed, &insert->k, f))
@@ -1280,7 +1295,7 @@ void bch_bset_delete(struct btree *b,
unsigned clobber_u64s)
{
struct bset_tree *t = bset_tree_last(b);
- struct bset *i = t->data;
+ struct bset *i = bset(b, t);
u64 *src_p = where->_data + clobber_u64s;
u64 *dst_p = where->_data;
@@ -1341,7 +1356,7 @@ static struct bkey_packed *bset_search_tree(const struct btree *b,
prefetch(p);
} else if (n << 3 < t->size) {
inorder = to_inorder(n, t);
- p = cacheline_to_bkey(t, inorder, 0);
+ p = cacheline_to_bkey(b, t, inorder, 0);
#ifdef CONFIG_X86_64
asm(".intel_syntax noprefix;"
"prefetcht0 [%0 - 127 + 64 * 0];"
@@ -1378,14 +1393,14 @@ static struct bkey_packed *bset_search_tree(const struct btree *b,
* we recursed left or recursed right.
*/
if (n & 1) {
- return cacheline_to_bkey(t, inorder, f->key_offset);
+ return cacheline_to_bkey(b, t, inorder, f->key_offset);
} else {
if (--inorder) {
n = inorder_prev(n >> 1, t->size);
f = bkey_float_get(base, n);
- return cacheline_to_bkey(t, inorder, f->key_offset);
+ return cacheline_to_bkey(b, t, inorder, f->key_offset);
} else
- return t->data->start;
+ return bset(b, t)->start;
}
}
@@ -1419,7 +1434,7 @@ static struct bkey_packed *bch_bset_search(struct btree *b,
switch (bset_aux_tree_type(t)) {
case BSET_NO_AUX_TREE:
- m = t->data->start;
+ m = bset(b, t)->start;
break;
case BSET_RW_AUX_TREE:
m = bset_search_write_set(b, t, search, lossy_packed_search);
@@ -1434,24 +1449,24 @@ static struct bkey_packed *bch_bset_search(struct btree *b,
if (unlikely(bkey_cmp_p_or_unp(b, &t->end,
packed_search, &search) < 0))
- return bset_bkey_last(t->data);
+ return bset_bkey_last(bset(b, t));
- if (unlikely(bkey_cmp_p_or_unp(b, t->data->start,
+ if (unlikely(bkey_cmp_p_or_unp(b, bset(b, t)->start,
packed_search, &search) >= 0))
- m = t->data->start;
+ m = bset(b, t)->start;
else
m = bset_search_tree(b, t, search, lossy_packed_search);
break;
}
if (lossy_packed_search)
- while (m != bset_bkey_last(t->data) &&
+ while (m != bset_bkey_last(bset(b, t)) &&
!btree_iter_pos_cmp_p_or_unp(b, search, lossy_packed_search,
m, strictly_greater))
m = bkey_next(m);
if (!packed_search)
- while (m != bset_bkey_last(t->data) &&
+ while (m != bset_bkey_last(bset(b, t)) &&
!btree_iter_pos_cmp_packed(b, &search, m, strictly_greater))
m = bkey_next(m);
@@ -1506,7 +1521,7 @@ static void btree_node_iter_init_pack_failed(struct btree_node_iter *iter,
__bch_btree_node_iter_push(iter, b,
bch_bset_search(b, t, search, NULL, NULL,
strictly_greater),
- bset_bkey_last(t->data));
+ bset_bkey_last(bset(b, t)));
bch_btree_node_iter_sort(iter, b);
}
@@ -1582,7 +1597,7 @@ void bch_btree_node_iter_init(struct btree_node_iter *iter,
bch_bset_search(b, t, search,
packed_search, &p,
strictly_greater),
- bset_bkey_last(t->data));
+ bset_bkey_last(bset(b, t)));
bch_btree_node_iter_sort(iter, b);
}
@@ -1597,8 +1612,8 @@ void bch_btree_node_iter_init_from_start(struct btree_node_iter *iter,
for_each_bset(b, t)
__bch_btree_node_iter_push(iter, b,
- t->data->start,
- bset_bkey_last(t->data));
+ bset(b, t)->start,
+ bset_bkey_last(bset(b, t)));
bch_btree_node_iter_sort(iter, b);
}
@@ -1701,12 +1716,12 @@ struct bkey_packed *bch_btree_node_iter_prev_all(struct btree_node_iter *iter,
for_each_bset(b, t) {
k = bkey_prev_all(b, t,
- bch_btree_node_iter_bset_pos(iter, b, t->data));
+ bch_btree_node_iter_bset_pos(iter, b, bset(b, t)));
if (k &&
(!prev || __btree_node_iter_cmp(iter->is_extents, b,
k, prev) > 0)) {
prev = k;
- prev_i = t->data;
+ prev_i = bset(b, t);
}
}
@@ -1770,7 +1785,8 @@ void bch_btree_keys_stats(struct btree *b, struct bset_stats *stats)
size_t j;
stats->sets[type].nr++;
- stats->sets[type].bytes += le16_to_cpu(t->data->u64s) * sizeof(u64);
+ stats->sets[type].bytes += le16_to_cpu(bset(b, t)->u64s) *
+ sizeof(u64);
if (bset_has_ro_aux_tree(t)) {
stats->floats += t->size - 1;
@@ -1806,7 +1822,7 @@ int bch_bkey_print_bfloat(struct btree *b, struct bkey_packed *k,
if (!bset_has_ro_aux_tree(t))
goto out;
- j = inorder_to_tree(bkey_to_cacheline(t, k), t);
+ j = inorder_to_tree(bkey_to_cacheline(b, t, k), t);
if (j &&
j < t->size &&
k == tree_to_bkey(b, t, j))
@@ -1821,11 +1837,12 @@ int bch_bkey_print_bfloat(struct btree *b, struct bkey_packed *k,
case BFLOAT_FAILED_PREV:
p = tree_to_prev_bkey(b, t, j);
l = is_power_of_2(j)
- ? t->data->start
+ ? bset(b, t)->start
: tree_to_prev_bkey(b, t, j >> ffs(j));
r = is_power_of_2(j + 1)
- ? bset_bkey_idx(t->data,
- le16_to_cpu(t->data->u64s) - t->end.u64s)
+ ? bset_bkey_idx(bset(b, t),
+ le16_to_cpu(bset(b, t)->u64s) -
+ t->end.u64s)
: tree_to_bkey(b, t, j >> (ffz(j) + 1));
up = bkey_unpack_key(b, p);
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
index 51043c969c9a..137858e32c3a 100644
--- a/drivers/md/bcache/bset.h
+++ b/drivers/md/bcache/bset.h
@@ -295,7 +295,7 @@ static inline void btree_node_set_format(struct btree *b,
static inline struct bset *bset_next_set(struct btree *b,
unsigned block_bytes)
{
- struct bset *i = bset_tree_last(b)->data;
+ struct bset *i = btree_bset_last(b);
EBUG_ON(!is_power_of_2(block_bytes));
@@ -454,7 +454,7 @@ static inline bool bch_btree_node_iter_end(struct btree_node_iter *iter)
static inline u16
__btree_node_key_to_offset(struct btree *b, const struct bkey_packed *k)
{
- size_t ret = (u64 *) k - (u64 *) b->set->data;
+ size_t ret = (u64 *) k - (u64 *) b->data - 1;
EBUG_ON(ret > U16_MAX);
return ret;
@@ -463,7 +463,7 @@ __btree_node_key_to_offset(struct btree *b, const struct bkey_packed *k)
static inline struct bkey_packed *
__btree_node_offset_to_key(struct btree *b, u16 k)
{
- return (void *) ((u64 *) b->set->data + k);
+ return (void *) ((u64 *) b->data + k + 1);
}
static inline int __btree_node_iter_cmp(bool is_extents,
diff --git a/drivers/md/bcache/btree_cache.c b/drivers/md/bcache/btree_cache.c
index 98501ccb5505..9df50b70078d 100644
--- a/drivers/md/bcache/btree_cache.c
+++ b/drivers/md/bcache/btree_cache.c
@@ -35,18 +35,18 @@ void bch_recalc_btree_reserve(struct cache_set *c)
#define mca_can_free(c) \
max_t(int, 0, c->btree_cache_used - c->btree_cache_reserve)
-static void __mca_data_free(struct btree *b)
+static void __mca_data_free(struct cache_set *c, struct btree *b)
{
EBUG_ON(btree_node_write_in_flight(b));
- free_pages((unsigned long) b->data, b->page_order);
+ free_pages((unsigned long) b->data, btree_page_order(c));
b->data = NULL;
bch_btree_keys_free(b);
}
static void mca_data_free(struct cache_set *c, struct btree *b)
{
- __mca_data_free(b);
+ __mca_data_free(c, b);
c->btree_cache_used--;
list_move(&b->list, &c->btree_cache_freed);
}
@@ -100,7 +100,6 @@ void mca_hash_remove(struct cache_set *c, struct btree *b)
BUG_ON(btree_node_dirty(b));
b->nsets = 0;
- b->set[0].data = NULL;
rhashtable_remove_fast(&c->btree_cache_table, &b->hash,
bch_btree_cache_params);
@@ -516,7 +515,6 @@ out:
b->flags = 0;
b->written = 0;
b->nsets = 0;
- b->set[0].data = NULL;
b->sib_u64s[0] = 0;
b->sib_u64s[1] = 0;
b->whiteout_u64s = 0;
diff --git a/drivers/md/bcache/btree_cache.h b/drivers/md/bcache/btree_cache.h
index 1939a3d3a785..e745abbe9219 100644
--- a/drivers/md/bcache/btree_cache.h
+++ b/drivers/md/bcache/btree_cache.h
@@ -46,6 +46,11 @@ static inline size_t btree_pages(struct cache_set *c)
return c->sb.btree_node_size >> (PAGE_SHIFT - 9);
}
+static inline size_t btree_page_order(struct cache_set *c)
+{
+ return ilog2(btree_pages(c));
+}
+
static inline unsigned btree_blocks(struct cache_set *c)
{
return c->sb.btree_node_size >> c->block_bits;
diff --git a/drivers/md/bcache/btree_gc.c b/drivers/md/bcache/btree_gc.c
index 29ce6fe2e238..0a1dcc39eea3 100644
--- a/drivers/md/bcache/btree_gc.c
+++ b/drivers/md/bcache/btree_gc.c
@@ -440,8 +440,8 @@ static void recalc_packed_keys(struct btree *b)
BUG_ON(b->nsets != 1);
- for (k = b->set[0].data->start;
- k != bset_bkey_last(b->set[0].data);
+ for (k = bset(b, &b->set[0])->start;
+ k != bset_bkey_last(bset(b, &b->set[0]));
k = bkey_next(k))
btree_keys_account_key_add(&b->nr, 0, k);
}
@@ -503,7 +503,7 @@ static void bch_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES],
/* Check if repacking would make any nodes too big to fit */
for (i = 0; i < nr_old_nodes; i++)
- if (!bch_btree_node_format_fits(old_nodes[i], &new_format)) {
+ if (!bch_btree_node_format_fits(c, old_nodes[i], &new_format)) {
trace_bcache_btree_gc_coalesce_fail(c,
BTREE_GC_COALESCE_FAIL_FORMAT_FITS);
goto out;
diff --git a/drivers/md/bcache/btree_io.c b/drivers/md/bcache/btree_io.c
index 84e5713ad8a7..39e3271269ec 100644
--- a/drivers/md/bcache/btree_io.c
+++ b/drivers/md/bcache/btree_io.c
@@ -267,17 +267,17 @@ static unsigned should_compact_bset(struct btree *b, struct bset_tree *t,
enum compact_mode mode)
{
unsigned live_u64s = b->nr.bset_u64s[t - b->set];
- unsigned bset_u64s = le16_to_cpu(t->data->u64s);
+ unsigned bset_u64s = le16_to_cpu(bset(b, t)->u64s);
if (live_u64s == bset_u64s)
return 0;
if (mode == COMPACT_LAZY) {
if (live_u64s * 4 < bset_u64s * 3 ||
- (compacting && bset_unwritten(b, t->data)))
+ (compacting && bset_unwritten(b, bset(b, t))))
return bset_u64s - live_u64s;
} else {
- if (bset_written(b, t->data))
+ if (bset_written(b, bset(b, t)))
return bset_u64s - live_u64s;
}
@@ -317,14 +317,14 @@ bool __bch_compact_whiteouts(struct cache_set *c, struct btree *b,
sort_iter_add(&sort_iter, u_start, u_pos);
for_each_bset(b, t) {
- struct bset *i = t->data;
+ struct bset *i = bset(b, t);
struct bkey_packed *k, *n, *out, *start, *end;
struct btree_node_entry *src = NULL, *dst = NULL;
if (t != b->set && bset_unwritten(b, i)) {
src = container_of(i, struct btree_node_entry, keys);
dst = max(write_block(b),
- (void *) bset_bkey_last(t[-1].data));
+ (void *) bset_bkey_last(bset(b, t -1)));
}
if (!should_compact_bset(b, t, compacting, mode)) {
@@ -332,7 +332,8 @@ bool __bch_compact_whiteouts(struct cache_set *c, struct btree *b,
memmove(dst, src, sizeof(*src) +
le16_to_cpu(src->keys.u64s) *
sizeof(u64));
- t->data = &dst->keys;
+ i = &dst->keys;
+ set_btree_bset(b, t, i);
}
continue;
}
@@ -343,12 +344,9 @@ bool __bch_compact_whiteouts(struct cache_set *c, struct btree *b,
end = bset_bkey_last(i);
if (src != dst) {
- src = container_of(i, struct btree_node_entry, keys);
- dst = max(write_block(b),
- (void *) bset_bkey_last(t[-1].data));
-
memmove(dst, src, sizeof(*src));
- i = t->data = &dst->keys;
+ i = &dst->keys;
+ set_btree_bset(b, t, i);
}
out = i->start;
@@ -424,7 +422,7 @@ static bool bch_drop_whiteouts(struct btree *b)
bool ret = false;
for_each_bset(b, t) {
- struct bset *i = t->data;
+ struct bset *i = bset(b, t);
struct bkey_packed *k, *n, *out, *start, *end;
if (!should_compact_bset(b, t, true, true))
@@ -437,10 +435,11 @@ static bool bch_drop_whiteouts(struct btree *b)
t != b->set) {
struct bset *dst =
max_t(struct bset *, write_block(b),
- (void *) bset_bkey_last(t[-1].data));
+ (void *) bset_bkey_last(bset(b, t -1)));
memmove(dst, i, sizeof(struct bset));
- i = t->data = dst;
+ i = dst;
+ set_btree_bset(b, t, i);
}
out = i->start;
@@ -554,6 +553,7 @@ static void btree_node_sort(struct cache_set *c, struct btree *b,
struct btree_node *out;
struct sort_iter sort_iter;
struct bset_tree *t;
+ struct bset *start_bset = bset(b, &b->set[start_idx]);
bool used_mempool = false;
u64 start_time;
unsigned i, u64s = 0, order, shift = end_idx - start_idx - 1;
@@ -565,13 +565,13 @@ static void btree_node_sort(struct cache_set *c, struct btree *b,
for (t = b->set + start_idx;
t < b->set + end_idx;
t++) {
- u64s += le16_to_cpu(t->data->u64s);
- sort_iter_add(&sort_iter, t->data->start,
- bset_bkey_last(t->data));
+ u64s += le16_to_cpu(bset(b, t)->u64s);
+ sort_iter_add(&sort_iter, bset(b, t)->start,
+ bset_bkey_last(bset(b, t)));
}
order = sorting_entire_node
- ? b->page_order
+ ? btree_page_order(c)
: get_order(__set_bytes(b->data, u64s));
out = btree_bounce_alloc(c, order, &used_mempool);
@@ -579,7 +579,7 @@ static void btree_node_sort(struct cache_set *c, struct btree *b,
start_time = local_clock();
if (btree_node_is_extents(b))
- filter_whiteouts = bset_written(b, b->set[start_idx].data);
+ filter_whiteouts = bset_written(b, start_bset);
u64s = btree_node_is_extents(b)
? sort_extents(out->keys.start, &sort_iter, filter_whiteouts)
@@ -597,14 +597,14 @@ static void btree_node_sort(struct cache_set *c, struct btree *b,
for (t = b->set + start_idx + 1;
t < b->set + end_idx;
t++)
- b->set[start_idx].data->journal_seq =
- max(b->set[start_idx].data->journal_seq,
- t->data->journal_seq);
+ start_bset->journal_seq =
+ max(start_bset->journal_seq,
+ bset(b, t)->journal_seq);
if (sorting_entire_node) {
unsigned u64s = le16_to_cpu(out->keys.u64s);
- BUG_ON(order != b->page_order);
+ BUG_ON(order != btree_page_order(c));
/*
* Our temporary buffer is the same size as the btree node's
@@ -614,10 +614,10 @@ static void btree_node_sort(struct cache_set *c, struct btree *b,
*out = *b->data;
out->keys.u64s = cpu_to_le16(u64s);
swap(out, b->data);
- b->set->data = &b->data->keys;
+ set_btree_bset(b, b->set, &b->data->keys);
} else {
- b->set[start_idx].data->u64s = out->keys.u64s;
- memcpy_u64s(b->set[start_idx].data->start,
+ start_bset->u64s = out->keys.u64s;
+ memcpy_u64s(start_bset->start,
out->keys.start,
le16_to_cpu(out->keys.u64s));
}
@@ -755,14 +755,14 @@ void bch_btree_sort_into(struct cache_set *c,
if (btree_node_ops(src)->key_normalize ||
btree_node_ops(src)->key_merge)
- nr = sort_repack_merge(c, dst->set->data,
+ nr = sort_repack_merge(c, btree_bset_first(dst),
src, &src_iter,
&dst->format,
true,
btree_node_ops(src)->key_normalize,
btree_node_ops(src)->key_merge);
else
- nr = sort_repack(dst->set->data,
+ nr = sort_repack(btree_bset_first(dst),
src, &src_iter,
&dst->format,
true);
@@ -792,7 +792,7 @@ static bool btree_node_compact(struct cache_set *c, struct btree *b,
for (unwritten_idx = 0;
unwritten_idx < b->nsets;
unwritten_idx++)
- if (bset_unwritten(b, b->set[unwritten_idx].data))
+ if (bset_unwritten(b, bset(b, &b->set[unwritten_idx])))
break;
if (b->nsets - unwritten_idx > 1) {
@@ -815,7 +815,7 @@ void bch_btree_build_aux_trees(struct btree *b)
for_each_bset(b, t)
bch_bset_build_aux_tree(b, t,
- bset_unwritten(b, t->data) &&
+ bset_unwritten(b, bset(b, t)) &&
t == bset_tree_last(b));
}
@@ -1040,7 +1040,7 @@ void bch_btree_node_read_done(struct cache_set *c, struct btree *b,
if (err)
goto err;
- b->set->data = &b->data->keys;
+ set_btree_bset(b, b->set, &b->data->keys);
btree_node_set_format(b, b->data->format);
} else {
@@ -1103,7 +1103,7 @@ void bch_btree_node_read_done(struct cache_set *c, struct btree *b,
*sorted = *b->data;
sorted->keys.u64s = cpu_to_le16(u64s);
swap(sorted, b->data);
- b->set->data = &b->data->keys;
+ set_btree_bset(b, b->set, &b->data->keys);
b->nsets = 1;
BUG_ON(b->nr.live_u64s != u64s);
@@ -1112,7 +1112,7 @@ void bch_btree_node_read_done(struct cache_set *c, struct btree *b,
bch_bset_build_aux_tree(b, b->set, false);
- set_needs_whiteout(b->set->data);
+ set_needs_whiteout(btree_bset_first(b));
btree_node_reset_sib_u64s(b);
@@ -1353,7 +1353,7 @@ void __bch_btree_node_write(struct cache_set *c, struct btree *b,
bytes += b->whiteout_u64s * sizeof(u64);
for_each_bset(b, t) {
- i = t->data;
+ i = bset(b, t);
if (bset_written(b, i))
continue;
@@ -1535,7 +1535,7 @@ bool bch_btree_post_write_cleanup(struct cache_set *c, struct btree *b)
}
for_each_bset(b, t)
- set_needs_whiteout(t->data);
+ set_needs_whiteout(bset(b, t));
bch_btree_verify(c, b);
@@ -1660,7 +1660,7 @@ void bch_btree_node_flush_journal_entries(struct cache_set *c,
* need to loop:
*/
while (i--) {
- u64 seq = le64_to_cpu(b->set[i].data->journal_seq);
+ u64 seq = le64_to_cpu(bset(b, &b->set[i])->journal_seq);
if (seq) {
bch_journal_flush_seq_async(&c->journal, seq, cl);
diff --git a/drivers/md/bcache/btree_io.h b/drivers/md/bcache/btree_io.h
index 3d95296a9cb0..866cc6c3008c 100644
--- a/drivers/md/bcache/btree_io.h
+++ b/drivers/md/bcache/btree_io.h
@@ -33,7 +33,7 @@ static inline bool bch_maybe_compact_whiteouts(struct cache_set *c, struct btree
for_each_bset(b, t) {
unsigned live_u64s = b->nr.bset_u64s[t - b->set];
- unsigned bset_u64s = le16_to_cpu(t->data->u64s);
+ unsigned bset_u64s = le16_to_cpu(bset(b, t)->u64s);
if (live_u64s * 4 < bset_u64s * 3)
goto compact;
diff --git a/drivers/md/bcache/btree_iter.c b/drivers/md/bcache/btree_iter.c
index 5f71058c0532..684c0e89622e 100644
--- a/drivers/md/bcache/btree_iter.c
+++ b/drivers/md/bcache/btree_iter.c
@@ -343,7 +343,7 @@ static void __bch_btree_node_iter_fix(struct btree_iter *iter,
unsigned clobber_u64s,
unsigned new_u64s)
{
- const struct bkey_packed *end = bset_bkey_last(t->data);
+ const struct bkey_packed *end = bset_bkey_last(bset(b, t));
struct btree_node_iter_set *set;
unsigned offset = __btree_node_key_to_offset(b, where);
int shift = new_u64s - clobber_u64s;
@@ -414,7 +414,7 @@ found:
k = bkey_prev_all(b, t,
bch_btree_node_iter_bset_pos(node_iter,
- b, t->data));
+ b, bset(b, t)));
if (k &&
__btree_node_iter_cmp(node_iter, b,
k, where) > 0) {
@@ -430,7 +430,7 @@ found:
}
bch_btree_node_iter_push(node_iter, b, k,
- bset_bkey_last(t->data));
+ bset_bkey_last(bset(b, t)));
}
next_bset:
t = t;
diff --git a/drivers/md/bcache/btree_types.h b/drivers/md/bcache/btree_types.h
index a8dd798e08fb..d7cca42f45df 100644
--- a/drivers/md/bcache/btree_types.h
+++ b/drivers/md/bcache/btree_types.h
@@ -45,13 +45,11 @@ struct bset_tree {
/* function of size - precalculated for to_inorder() */
u16 extra;
+ u16 data_offset;
u16 aux_data_offset;
/* copy of the last key in the set */
struct bkey_packed end;
-
- /* The actual btree node, with pointers to each sorted set */
- struct bset *data;
};
struct btree_write {
@@ -72,16 +70,8 @@ struct btree {
u16 written;
u8 level;
u8 btree_id;
- u16 sib_u64s[2];
- u16 whiteout_u64s;
- u16 uncompacted_whiteout_u64s;
-
u8 nsets;
- u8 page_order;
u8 nr_key_bits;
- u8 unpack_fn_len;
-
- struct btree_nr_keys nr;
struct bkey_format format;
@@ -97,6 +87,13 @@ struct btree {
*/
struct bset_tree set[MAX_BSETS];
+ struct btree_nr_keys nr;
+ u16 sib_u64s[2];
+ u16 whiteout_u64s;
+ u16 uncompacted_whiteout_u64s;
+ u8 page_order;
+ u8 unpack_fn_len;
+
/*
* XXX: add a delete sequence number, so when btree_node_relock() fails
* because the lock sequence number has changed - i.e. the contents were
@@ -165,20 +162,34 @@ static inline struct btree_write *btree_prev_write(struct btree *b)
return b->writes + (btree_node_write_idx(b) ^ 1);
}
-static inline struct bset *btree_bset_first(struct btree *b)
-{
- return b->set->data;
-}
-
static inline struct bset_tree *bset_tree_last(struct btree *b)
{
EBUG_ON(!b->nsets);
return b->set + b->nsets - 1;
}
+static inline struct bset *bset(const struct btree *b,
+ const struct bset_tree *t)
+{
+ return (void *) b->data + t->data_offset * sizeof(u64);
+}
+
+static inline void set_btree_bset(struct btree *b, struct bset_tree *t,
+ const struct bset *i)
+{
+ t->data_offset = (u64 *) i - (u64 *) b->data;
+
+ EBUG_ON(bset(b, t) != i);
+}
+
+static inline struct bset *btree_bset_first(struct btree *b)
+{
+ return bset(b, b->set);
+}
+
static inline struct bset *btree_bset_last(struct btree *b)
{
- return bset_tree_last(b)->data;
+ return bset(b, bset_tree_last(b));
}
static inline unsigned bset_byte_offset(struct btree *b, void *i)
diff --git a/drivers/md/bcache/btree_update.c b/drivers/md/bcache/btree_update.c
index 246568b53056..f91b388b1f72 100644
--- a/drivers/md/bcache/btree_update.c
+++ b/drivers/md/bcache/btree_update.c
@@ -31,8 +31,8 @@ void __bch_btree_calc_format(struct bkey_format_state *s, struct btree *b)
struct bkey uk;
for_each_bset(b, t)
- for (k = t->data->start;
- k != bset_bkey_last(t->data);
+ for (k = bset(b, t)->start;
+ k != bset_bkey_last(bset(b, t));
k = bkey_next(k))
if (!bkey_whiteout(k)) {
uk = bkey_unpack_key(b, k);
@@ -73,12 +73,12 @@ static size_t btree_node_u64s_with_format(struct btree *b,
* This assumes all keys can pack with the new format -- it just checks if
* the re-packed keys would fit inside the node itself.
*/
-bool bch_btree_node_format_fits(struct btree *b, struct bkey_format *new_f)
+bool bch_btree_node_format_fits(struct cache_set *c, struct btree *b,
+ struct bkey_format *new_f)
{
size_t u64s = btree_node_u64s_with_format(b, new_f);
- return __set_bytes(b->data, u64s) <
- PAGE_SIZE << b->page_order;
+ return __set_bytes(b->data, u64s) < btree_bytes(c);
}
/* Btree node freeing/allocation: */
@@ -342,7 +342,7 @@ struct btree *btree_node_alloc_replacement(struct cache_set *c,
* The keys might expand with the new format - if they wouldn't fit in
* the btree node anymore, use the old format for now:
*/
- if (!bch_btree_node_format_fits(b, &new_f))
+ if (!bch_btree_node_format_fits(c, b, &new_f))
new_f = b->format;
return __btree_node_alloc_replacement(c, b, new_f, reserve);
@@ -681,7 +681,7 @@ bool bch_btree_bset_insert_key(struct btree_iter *iter,
t = bch_bkey_to_bset(b, k);
- if (bset_unwritten(b, t->data) &&
+ if (bset_unwritten(b, bset(b, t)) &&
bkey_val_u64s(&insert->k) == bkeyp_val_u64s(f, k)) {
BUG_ON(bkey_whiteout(k) != bkey_whiteout(&insert->k));
@@ -734,7 +734,7 @@ bool bch_btree_bset_insert_key(struct btree_iter *iter,
}
t = bset_tree_last(b);
- k = bch_btree_node_iter_bset_pos(node_iter, b, t->data);
+ k = bch_btree_node_iter_bset_pos(node_iter, b, bset(b, t));
clobber_u64s = 0;
overwrite:
bch_bset_insert(b, node_iter, k, insert, clobber_u64s);
@@ -1094,7 +1094,7 @@ void bch_btree_interior_update_will_free_node(struct cache_set *c,
* in with keys that aren't in the journal anymore:
*/
for_each_bset(b, t)
- as->journal_seq = max(as->journal_seq, t->data->journal_seq);
+ as->journal_seq = max(as->journal_seq, bset(b, t)->journal_seq);
/*
* Does this node have unwritten data that has a pin on the journal?
@@ -1384,7 +1384,7 @@ static void btree_split_insert_keys(struct btree_iter *iter, struct btree *b,
p = bkey_next(p);
BUG_ON(b->nsets != 1 ||
- b->nr.live_u64s != le16_to_cpu(b->set->data->u64s));
+ b->nr.live_u64s != le16_to_cpu(btree_bset_first(b)->u64s));
btree_node_interior_verify(b);
}
diff --git a/drivers/md/bcache/btree_update.h b/drivers/md/bcache/btree_update.h
index 2b83906ec359..015444109984 100644
--- a/drivers/md/bcache/btree_update.h
+++ b/drivers/md/bcache/btree_update.h
@@ -31,7 +31,8 @@ struct btree_reserve {
};
void __bch_btree_calc_format(struct bkey_format_state *, struct btree *);
-bool bch_btree_node_format_fits(struct btree *, struct bkey_format *);
+bool bch_btree_node_format_fits(struct cache_set *c, struct btree *,
+ struct bkey_format *);
/* Btree node freeing/allocation: */
@@ -277,7 +278,7 @@ static inline bool bch_btree_node_insert_fits(struct cache_set *c,
static inline void unreserve_whiteout(struct btree *b, struct bset_tree *t,
struct bkey_packed *k)
{
- if (bset_written(b, t->data)) {
+ if (bset_written(b, bset(b, t))) {
EBUG_ON(b->uncompacted_whiteout_u64s <
bkeyp_key_u64s(&b->format, k));
b->uncompacted_whiteout_u64s -=
@@ -288,7 +289,7 @@ static inline void unreserve_whiteout(struct btree *b, struct bset_tree *t,
static inline void reserve_whiteout(struct btree *b, struct bset_tree *t,
struct bkey_packed *k)
{
- if (bset_written(b, t->data)) {
+ if (bset_written(b, bset(b, t))) {
BUG_ON(!k->needs_whiteout);
b->uncompacted_whiteout_u64s +=
bkeyp_key_u64s(&b->format, k);
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
index 3f2d8df90904..e498e9e67000 100644
--- a/drivers/md/bcache/extents.c
+++ b/drivers/md/bcache/extents.c
@@ -1082,7 +1082,7 @@ static void extent_bset_insert(struct cache_set *c, struct btree_iter *iter,
struct btree_node_iter *node_iter = &iter->node_iters[0];
struct bset_tree *t = bset_tree_last(b);
struct bkey_packed *where =
- bch_btree_node_iter_bset_pos(node_iter, b, t->data);
+ bch_btree_node_iter_bset_pos(node_iter, b, bset(b, t));
struct bkey_packed *prev = bkey_prev(b, t, where);
struct bkey_packed *next_live_key = where;
unsigned clobber_u64s;
@@ -1090,7 +1090,7 @@ static void extent_bset_insert(struct cache_set *c, struct btree_iter *iter,
if (prev)
where = bkey_next(prev);
- while (next_live_key != bset_bkey_last(t->data) &&
+ while (next_live_key != bset_bkey_last(bset(b, t)) &&
bkey_deleted(next_live_key))
next_live_key = bkey_next(next_live_key);
@@ -1104,7 +1104,7 @@ static void extent_bset_insert(struct cache_set *c, struct btree_iter *iter,
bch_extent_merge_inline(c, iter, prev, bkey_to_packed(insert), true))
goto drop_deleted_keys;
- if (next_live_key != bset_bkey_last(t->data) &&
+ if (next_live_key != bset_bkey_last(bset(b, t)) &&
bch_extent_merge_inline(c, iter, bkey_to_packed(insert),
next_live_key, false))
goto drop_deleted_keys;
@@ -1376,7 +1376,7 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert,
* what k points to)
*/
bkey_reassemble(&split.k, k.s_c);
- split.k.k.needs_whiteout |= bset_written(b, t->data);
+ split.k.k.needs_whiteout |= bset_written(b, bset(b, t));
bch_cut_back(bkey_start_pos(&insert->k), &split.k.k);
BUG_ON(bkey_deleted(&split.k.k));
@@ -1458,7 +1458,7 @@ bch_delete_fixup_extent(struct extent_insert_state *s)
_k->type = KEY_TYPE_DISCARD;
reserve_whiteout(b, t, _k);
} else if (k.k->needs_whiteout ||
- bset_written(b, t->data)) {
+ bset_written(b, bset(b, t))) {
struct bkey_i discard = *insert;
switch (overlap) {
@@ -1625,7 +1625,7 @@ bch_insert_fixup_extent(struct btree_insert *trans,
}
if (k.k->size &&
- (k.k->needs_whiteout || bset_written(b, t->data)))
+ (k.k->needs_whiteout || bset_written(b, bset(b, t))))
insert->k->k.needs_whiteout = true;
if (overlap == BCH_EXTENT_OVERLAP_ALL &&
@@ -2367,19 +2367,21 @@ static bool extent_merge_do_overlapping(struct btree_iter *iter,
*/
do_fixup:
for_each_bset(b, t) {
+ struct bset *i = bset(b, t);
+
if (t == bset_tree_last(b))
break;
- if (!t->data->u64s)
+ if (!i->u64s)
continue;
/*
* if we don't find this bset in the iterator we already got to
* the end of that bset, so start searching from the end.
*/
- k = bch_btree_node_iter_bset_pos(node_iter, b, t->data);
+ k = bch_btree_node_iter_bset_pos(node_iter, b, i);
- if (k == bset_bkey_last(t->data))
+ if (k == bset_bkey_last(i))
k = bkey_prev_all(b, t, k);
if (back_merge) {
@@ -2403,7 +2405,7 @@ do_fixup:
} else {
/* Front merge - walk forwards */
for (;
- k != bset_bkey_last(t->data) &&
+ k != bset_bkey_last(i) &&
(uk = bkey_unpack_key(b, k),
bkey_cmp(uk.p, m->p) < 0);
k = bkey_next(k)) {
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index d5248e296126..04398c05373b 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -540,7 +540,7 @@ static size_t bch_cache_size(struct cache_set *c)
mutex_lock(&c->btree_cache_lock);
list_for_each_entry(b, &c->btree_cache, list)
- ret += 1 << (b->page_order + PAGE_SHIFT);
+ ret += btree_bytes(c);
mutex_unlock(&c->btree_cache_lock);
return ret;