diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2016-11-30 21:47:15 -0900 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@gmail.com> | 2017-01-18 21:41:18 -0900 |
commit | ad70466ab86e6919ee588f784b8d65d2fef76cfd (patch) | |
tree | 9a1dd4dd2dba4df02404dd980484e6f623a44174 | |
parent | e4dd371cf9cdf9560c2605882d3b7d38519f020a (diff) |
bcache: kill struct btree_keys
-rw-r--r-- | drivers/md/bcache/bkey.c | 18 | ||||
-rw-r--r-- | drivers/md/bcache/bkey.h | 24 | ||||
-rw-r--r-- | drivers/md/bcache/bkey_methods.h | 5 | ||||
-rw-r--r-- | drivers/md/bcache/bset.c | 128 | ||||
-rw-r--r-- | drivers/md/bcache/bset.h | 178 | ||||
-rw-r--r-- | drivers/md/bcache/btree_cache.c | 22 | ||||
-rw-r--r-- | drivers/md/bcache/btree_gc.c | 20 | ||||
-rw-r--r-- | drivers/md/bcache/btree_io.c | 192 | ||||
-rw-r--r-- | drivers/md/bcache/btree_io.h | 4 | ||||
-rw-r--r-- | drivers/md/bcache/btree_iter.c | 77 | ||||
-rw-r--r-- | drivers/md/bcache/btree_types.h | 97 | ||||
-rw-r--r-- | drivers/md/bcache/btree_update.c | 138 | ||||
-rw-r--r-- | drivers/md/bcache/btree_update.h | 10 | ||||
-rw-r--r-- | drivers/md/bcache/debug.c | 24 | ||||
-rw-r--r-- | drivers/md/bcache/extents.c | 78 | ||||
-rw-r--r-- | drivers/md/bcache/extents.h | 4 | ||||
-rw-r--r-- | drivers/md/bcache/sysfs.c | 6 |
17 files changed, 509 insertions, 516 deletions
diff --git a/drivers/md/bcache/bkey.c b/drivers/md/bcache/bkey.c index dfbfd767ae02..966620014c8f 100644 --- a/drivers/md/bcache/bkey.c +++ b/drivers/md/bcache/bkey.c @@ -384,7 +384,7 @@ bool bkey_pack_key(struct bkey_packed *out, const struct bkey *in, /** * bkey_unpack -- unpack the key and the value */ -void bkey_unpack(const struct btree_keys *b, struct bkey_i *dst, +void bkey_unpack(const struct btree *b, struct bkey_i *dst, const struct bkey_packed *src) { dst->k = bkey_unpack_key(b, src); @@ -446,7 +446,7 @@ static bool set_inc_field_lossy(struct pack_state *state, unsigned field, u64 v) #ifdef CONFIG_BCACHE_DEBUG static bool bkey_packed_successor(struct bkey_packed *out, - const struct btree_keys *b, + const struct btree *b, struct bkey_packed k) { const struct bkey_format *f = &b->format; @@ -495,7 +495,7 @@ static bool bkey_packed_successor(struct bkey_packed *out, */ enum bkey_pack_pos_ret bkey_pack_pos_lossy(struct bkey_packed *out, struct bpos in, - const struct btree_keys *b) + const struct btree *b) { const struct bkey_format *f = &b->format; struct pack_state state = pack_state_init(f, out); @@ -690,7 +690,7 @@ const char *bch_bkey_format_validate(struct bkey_format *f) * Bits are indexed from 0 - return is [0, nr_key_bits) */ __pure -unsigned bkey_greatest_differing_bit(const struct btree_keys *b, +unsigned bkey_greatest_differing_bit(const struct btree *b, const struct bkey_packed *l_k, const struct bkey_packed *r_k) { @@ -734,7 +734,7 @@ unsigned bkey_greatest_differing_bit(const struct btree_keys *b, * Bits are indexed from 0 - return is [0, nr_key_bits) */ __pure -unsigned bkey_ffs(const struct btree_keys *b, +unsigned bkey_ffs(const struct btree *b, const struct bkey_packed *k) { const u64 *p = high_word(&b->format, k); @@ -1118,7 +1118,7 @@ int bkey_cmp(const struct bkey *l, const struct bkey *r) __pure int __bkey_cmp_packed_format_checked(const struct bkey_packed *l, const struct bkey_packed *r, - const struct btree_keys *b) + const struct btree *b) { const struct bkey_format *f = &b->format; int ret; @@ -1136,7 +1136,7 @@ int __bkey_cmp_packed_format_checked(const struct bkey_packed *l, } __pure __flatten -int __bkey_cmp_left_packed_format_checked(const struct btree_keys *b, +int __bkey_cmp_left_packed_format_checked(const struct btree *b, const struct bkey_packed *l, const struct bpos *r) { @@ -1150,7 +1150,7 @@ int __bkey_cmp_left_packed_format_checked(const struct btree_keys *b, __pure __flatten int __bkey_cmp_packed(const struct bkey_packed *l, const struct bkey_packed *r, - const struct btree_keys *b) + const struct btree *b) { int packed = bkey_lr_packed(l, r); @@ -1175,7 +1175,7 @@ int __bkey_cmp_packed(const struct bkey_packed *l, } __pure __flatten -int bkey_cmp_left_packed(const struct btree_keys *b, +int bkey_cmp_left_packed(const struct btree *b, const struct bkey_packed *l, const struct bpos *r) { const struct bkey *l_unpacked; diff --git a/drivers/md/bcache/bkey.h b/drivers/md/bcache/bkey.h index 57e5b8d3fb74..dbea52986a53 100644 --- a/drivers/md/bcache/bkey.h +++ b/drivers/md/bcache/bkey.h @@ -9,6 +9,8 @@ void bch_to_binary(char *, const u64 *, unsigned); int bch_bkey_to_text(char *, size_t, const struct bkey *); +#define BKEY_PADDED(key) __BKEY_PADDED(key, BKEY_EXTENT_VAL_U64s_MAX) + /* bkey with split value, const */ struct bkey_s_c { const struct bkey *k; @@ -110,7 +112,7 @@ do { \ ((struct bkey *) (_src))->u64s); \ } while (0) -struct btree_keys; +struct btree; struct bkey_format_state { u64 field_min[BKEY_NR_FIELDS]; @@ -124,29 +126,29 @@ struct bkey_format bch_bkey_format_done(struct bkey_format_state *); const char *bch_bkey_format_validate(struct bkey_format *); __pure -unsigned bkey_greatest_differing_bit(const struct btree_keys *, +unsigned bkey_greatest_differing_bit(const struct btree *, const struct bkey_packed *, const struct bkey_packed *); __pure -unsigned bkey_ffs(const struct btree_keys *, const struct bkey_packed *); +unsigned bkey_ffs(const struct btree *, const struct bkey_packed *); __pure int __bkey_cmp_packed_format_checked(const struct bkey_packed *, const struct bkey_packed *, - const struct btree_keys *); + const struct btree *); __pure -int __bkey_cmp_left_packed_format_checked(const struct btree_keys *, +int __bkey_cmp_left_packed_format_checked(const struct btree *, const struct bkey_packed *, const struct bpos *); __pure int __bkey_cmp_packed(const struct bkey_packed *, const struct bkey_packed *, - const struct btree_keys *); + const struct btree *); __pure -int bkey_cmp_left_packed(const struct btree_keys *, +int bkey_cmp_left_packed(const struct btree *, const struct bkey_packed *, const struct bpos *); @@ -155,7 +157,7 @@ int bkey_cmp_left_packed(const struct btree_keys *, * pass it by by val... as much as I hate c++, const ref would be nice here: */ __pure __flatten -static inline int bkey_cmp_left_packed_byval(const struct btree_keys *b, +static inline int bkey_cmp_left_packed_byval(const struct btree *b, const struct bkey_packed *l, struct bpos r) { @@ -336,15 +338,15 @@ enum bkey_pack_pos_ret { }; enum bkey_pack_pos_ret bkey_pack_pos_lossy(struct bkey_packed *, struct bpos, - const struct btree_keys *); + const struct btree *); static inline bool bkey_pack_pos(struct bkey_packed *out, struct bpos in, - const struct btree_keys *b) + const struct btree *b) { return bkey_pack_pos_lossy(out, in, b) == BKEY_PACK_POS_EXACT; } -void bkey_unpack(const struct btree_keys *, struct bkey_i *, +void bkey_unpack(const struct btree *, struct bkey_i *, const struct bkey_packed *); bool bkey_pack(struct bkey_packed *, const struct bkey_i *, const struct bkey_format *); diff --git a/drivers/md/bcache/bkey_methods.h b/drivers/md/bcache/bkey_methods.h index 19746aec3fb3..c4e80efb869a 100644 --- a/drivers/md/bcache/bkey_methods.h +++ b/drivers/md/bcache/bkey_methods.h @@ -26,7 +26,6 @@ static inline bool btree_type_has_ptrs(enum bkey_type type) } struct cache_set; -struct btree_keys; struct btree; struct bkey; @@ -41,10 +40,10 @@ enum merge_result { BCH_MERGE_MERGE, }; -typedef bool (*key_filter_fn)(struct cache_set *, struct btree_keys *, +typedef bool (*key_filter_fn)(struct cache_set *, struct btree *, struct bkey_s); typedef enum merge_result (*key_merge_fn)(struct cache_set *, - struct btree_keys *, + struct btree *, struct bkey_i *, struct bkey_i *); struct bkey_ops { diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index d55c9fe5d1a3..6936efbc81fc 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -20,7 +20,7 @@ #include "alloc_types.h" #include <trace/events/bcache.h> -struct bset_tree *bch_bkey_to_bset(struct btree_keys *b, struct bkey_packed *k) +struct bset_tree *bch_bkey_to_bset(struct btree *b, struct bkey_packed *k) { struct bset_tree *t; @@ -48,7 +48,7 @@ struct bset_tree *bch_bkey_to_bset(struct btree_keys *b, struct bkey_packed *k) * by the time we actually do the insert will all be deleted. */ -void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set) +void bch_dump_bset(struct btree *b, struct bset *i, unsigned set) { struct bkey_packed *_k, *_n; struct bkey k, n; @@ -89,7 +89,7 @@ void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set) } } -void bch_dump_btree_node(struct btree_keys *b) +void bch_dump_btree_node(struct btree *b) { struct bset_tree *t; @@ -99,7 +99,7 @@ void bch_dump_btree_node(struct btree_keys *b) console_unlock(); } -void bch_dump_btree_node_iter(struct btree_keys *b, +void bch_dump_btree_node_iter(struct btree *b, struct btree_node_iter *iter) { struct btree_node_iter_set *set; @@ -120,7 +120,7 @@ void bch_dump_btree_node_iter(struct btree_keys *b, #ifdef CONFIG_BCACHE_DEBUG -static bool keys_out_of_order(struct btree_keys *b, +static bool keys_out_of_order(struct btree *b, const struct bkey_packed *prev, const struct bkey_packed *next, bool is_extents) @@ -134,7 +134,7 @@ static bool keys_out_of_order(struct btree_keys *b, !bkey_cmp_packed(b, prev, next)); } -void __bch_verify_btree_nr_keys(struct btree_keys *b) +void __bch_verify_btree_nr_keys(struct btree *b) { struct bset_tree *t; struct bkey_packed *k; @@ -151,7 +151,7 @@ void __bch_verify_btree_nr_keys(struct btree_keys *b) } static void bch_btree_node_iter_next_check(struct btree_node_iter *iter, - struct btree_keys *b, + struct btree *b, struct bkey_packed *k) { const struct bkey_packed *n = bch_btree_node_iter_peek_all(iter, b); @@ -172,7 +172,7 @@ static void bch_btree_node_iter_next_check(struct btree_node_iter *iter, } void bch_btree_node_iter_verify(struct btree_node_iter *iter, - struct btree_keys *b) + struct btree *b) { struct btree_node_iter_set *set; struct bset_tree *t; @@ -204,7 +204,7 @@ void bch_btree_node_iter_verify(struct btree_node_iter *iter, k, first) > 0); } -void bch_verify_key_order(struct btree_keys *b, +void bch_verify_key_order(struct btree *b, struct btree_node_iter *iter, struct bkey_packed *where) { @@ -268,7 +268,7 @@ void bch_verify_key_order(struct btree_keys *b, #else static void bch_btree_node_iter_next_check(struct btree_node_iter *iter, - struct btree_keys *b, + struct btree *b, struct bkey_packed *k) {} #endif @@ -330,22 +330,22 @@ struct ro_aux_tree { #define BSET_CACHELINE 128 /* Space required for the btree node keys */ -static inline size_t btree_keys_bytes(struct btree_keys *b) +static inline size_t btree_keys_bytes(struct btree *b) { return PAGE_SIZE << b->page_order; } -static inline size_t btree_keys_cachelines(struct btree_keys *b) +static inline size_t btree_keys_cachelines(struct btree *b) { return btree_keys_bytes(b) / BSET_CACHELINE; } -static inline size_t btree_aux_data_bytes(struct btree_keys *b) +static inline size_t btree_aux_data_bytes(struct btree *b) { return btree_keys_cachelines(b) * 8; } -static inline size_t btree_aux_data_u64s(struct btree_keys *b) +static inline size_t btree_aux_data_u64s(struct btree *b) { return btree_aux_data_bytes(b) / sizeof(u64); } @@ -369,7 +369,7 @@ static unsigned bset_aux_tree_buf_end(const struct bset_tree *t) } } -static unsigned bset_aux_tree_buf_start(const struct btree_keys *b, +static unsigned bset_aux_tree_buf_start(const struct btree *b, const struct bset_tree *t) { return t == b->set @@ -377,13 +377,13 @@ static unsigned bset_aux_tree_buf_start(const struct btree_keys *b, : bset_aux_tree_buf_end(t - 1); } -static void *__aux_tree_base(const struct btree_keys *b, +static void *__aux_tree_base(const struct btree *b, const struct bset_tree *t) { return b->aux_data + t->aux_data_offset * 8; } -static struct ro_aux_tree *ro_aux_tree_base(const struct btree_keys *b, +static struct ro_aux_tree *ro_aux_tree_base(const struct btree *b, const struct bset_tree *t) { EBUG_ON(bset_aux_tree_type(t) != BSET_RO_AUX_TREE); @@ -391,7 +391,7 @@ static struct ro_aux_tree *ro_aux_tree_base(const struct btree_keys *b, return __aux_tree_base(b, t); } -static u8 *ro_aux_tree_prev(const struct btree_keys *b, +static u8 *ro_aux_tree_prev(const struct btree *b, const struct bset_tree *t) { EBUG_ON(bset_aux_tree_type(t) != BSET_RO_AUX_TREE); @@ -405,14 +405,14 @@ static struct bkey_float *bkey_float_get(struct ro_aux_tree *b, return (void *) b + bkey_float_byte_offset(idx); } -static struct bkey_float *bkey_float(const struct btree_keys *b, +static struct bkey_float *bkey_float(const struct btree *b, const struct bset_tree *t, unsigned idx) { return bkey_float_get(ro_aux_tree_base(b, t), idx); } -static void bset_aux_tree_verify(struct btree_keys *b) +static void bset_aux_tree_verify(struct btree *b) { #ifdef CONFIG_BCACHE_DEBUG struct bset_tree *t; @@ -433,13 +433,13 @@ static void bset_aux_tree_verify(struct btree_keys *b) /* Memory allocation */ -void bch_btree_keys_free(struct btree_keys *b) +void bch_btree_keys_free(struct btree *b) { vfree(b->aux_data); b->aux_data = NULL; } -int bch_btree_keys_alloc(struct btree_keys *b, unsigned page_order, gfp_t gfp) +int bch_btree_keys_alloc(struct btree *b, unsigned page_order, gfp_t gfp) { b->page_order = page_order; b->aux_data = __vmalloc(btree_aux_data_bytes(b), gfp, @@ -450,7 +450,7 @@ int bch_btree_keys_alloc(struct btree_keys *b, unsigned page_order, gfp_t gfp) return 0; } -void bch_btree_keys_init(struct btree_keys *b, bool *expensive_debug_checks) +void bch_btree_keys_init(struct btree *b, bool *expensive_debug_checks) { unsigned i; @@ -667,14 +667,14 @@ static unsigned bkey_to_cacheline_offset(struct bset_tree *t, return m; } -static struct bkey_packed *tree_to_bkey(const struct btree_keys *b, +static struct bkey_packed *tree_to_bkey(const struct btree *b, struct bset_tree *t, unsigned j) { return cacheline_to_bkey(t, to_inorder(j, t), bkey_float(b, t, j)->key_offset); } -static struct bkey_packed *tree_to_prev_bkey(struct btree_keys *b, +static struct bkey_packed *tree_to_prev_bkey(struct btree *b, struct bset_tree *t, unsigned j) { unsigned prev_u64s = ro_aux_tree_prev(b, t)[j]; @@ -682,7 +682,7 @@ static struct bkey_packed *tree_to_prev_bkey(struct btree_keys *b, return (void *) (tree_to_bkey(b, t, j)->_data - prev_u64s); } -static u8 *rw_aux_tree(const struct btree_keys *b, +static u8 *rw_aux_tree(const struct btree *b, const struct bset_tree *t) { EBUG_ON(bset_aux_tree_type(t) != BSET_RW_AUX_TREE); @@ -694,7 +694,7 @@ static u8 *rw_aux_tree(const struct btree_keys *b, * For the write set - the one we're currently inserting keys into - we don't * maintain a full search tree, we just keep a simple lookup table in t->prev. */ -static struct bkey_packed *table_to_bkey(const struct btree_keys *b, +static struct bkey_packed *table_to_bkey(const struct btree *b, struct bset_tree *t, unsigned cacheline) { @@ -740,7 +740,7 @@ static inline unsigned bkey_mantissa(const struct bkey_packed *k, return idx < BFLOAT_32BIT_NR ? (u32) v : (u16) v; } -static void make_bfloat(struct btree_keys *b, +static void make_bfloat(struct btree *b, struct bset_tree *t, unsigned j) { struct bkey_float *f = bkey_float(b, t, j); @@ -847,14 +847,14 @@ static void make_bfloat(struct btree_keys *b, } /* bytes remaining - only valid for last bset: */ -static unsigned __bset_tree_capacity(struct btree_keys *b, struct bset_tree *t) +static unsigned __bset_tree_capacity(struct btree *b, struct bset_tree *t) { bset_aux_tree_verify(b); return btree_aux_data_bytes(b) - t->aux_data_offset * sizeof(u64); } -static unsigned bset_ro_tree_capacity(struct btree_keys *b, struct bset_tree *t) +static unsigned bset_ro_tree_capacity(struct btree *b, struct bset_tree *t) { unsigned bytes = __bset_tree_capacity(b, t); @@ -866,12 +866,12 @@ static unsigned bset_ro_tree_capacity(struct btree_keys *b, struct bset_tree *t) return BFLOAT_32BIT_NR + bytes / 5; } -static unsigned bset_rw_tree_capacity(struct btree_keys *b, struct bset_tree *t) +static unsigned bset_rw_tree_capacity(struct btree *b, struct bset_tree *t) { return __bset_tree_capacity(b, t) / sizeof(u8); } -static void bch_bset_lookup_table_add_entries(struct btree_keys *b, +static void bch_bset_lookup_table_add_entries(struct btree *b, struct bset_tree *t) { struct bkey_packed *k; @@ -892,7 +892,7 @@ static void bch_bset_lookup_table_add_entries(struct btree_keys *b, } } -static void __build_rw_aux_tree(struct btree_keys *b, struct bset_tree *t) +static void __build_rw_aux_tree(struct btree *b, struct bset_tree *t) { t->size = 1; t->extra = BSET_RW_AUX_TREE_VAL; @@ -901,7 +901,7 @@ static void __build_rw_aux_tree(struct btree_keys *b, struct bset_tree *t) bch_bset_lookup_table_add_entries(b, t); } -static void __build_ro_aux_tree(struct btree_keys *b, struct bset_tree *t) +static void __build_ro_aux_tree(struct btree *b, struct bset_tree *t) { struct bkey_packed *prev = NULL, *k = t->data->start; unsigned j, cacheline = 1; @@ -949,7 +949,7 @@ retry: make_bfloat(b, t, j); } -static void bset_alloc_tree(struct btree_keys *b, struct bset_tree *t) +static void bset_alloc_tree(struct btree *b, struct bset_tree *t) { struct bset_tree *i; @@ -965,7 +965,7 @@ static void bset_alloc_tree(struct btree_keys *b, struct bset_tree *t) bset_aux_tree_verify(b); } -void bch_bset_build_aux_tree(struct btree_keys *b, struct bset_tree *t, +void bch_bset_build_aux_tree(struct btree *b, struct bset_tree *t, bool writeable) { if (writeable @@ -986,7 +986,7 @@ void bch_bset_build_aux_tree(struct btree_keys *b, struct bset_tree *t, bset_aux_tree_verify(b); } -void bch_bset_init_first(struct btree_keys *b, struct bset *i) +void bch_bset_init_first(struct btree *b, struct bset *i) { struct bset_tree *t; @@ -999,7 +999,7 @@ void bch_bset_init_first(struct btree_keys *b, struct bset *i) SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN); } -void bch_bset_init_next(struct btree_keys *b, struct bset *i) +void bch_bset_init_next(struct btree *b, struct bset *i) { struct bset_tree *t; @@ -1012,7 +1012,7 @@ void bch_bset_init_next(struct btree_keys *b, struct bset *i) SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN); } -static struct bkey_packed *__bkey_prev(struct btree_keys *b, struct bset_tree *t, +static struct bkey_packed *__bkey_prev(struct btree *b, struct bset_tree *t, struct bkey_packed *k) { struct bkey_packed *p; @@ -1048,7 +1048,7 @@ static struct bkey_packed *__bkey_prev(struct btree_keys *b, struct bset_tree *t return p; } -struct bkey_packed *bkey_prev_all(struct btree_keys *b, struct bset_tree *t, +struct bkey_packed *bkey_prev_all(struct btree *b, struct bset_tree *t, struct bkey_packed *k) { struct bkey_packed *p; @@ -1063,7 +1063,7 @@ struct bkey_packed *bkey_prev_all(struct btree_keys *b, struct bset_tree *t, return p; } -struct bkey_packed *bkey_prev(struct btree_keys *b, struct bset_tree *t, +struct bkey_packed *bkey_prev(struct btree *b, struct bset_tree *t, struct bkey_packed *k) { while (1) { @@ -1091,7 +1091,7 @@ struct bkey_packed *bkey_prev(struct btree_keys *b, struct bset_tree *t, * modified, fix any auxiliary search tree by remaking all the nodes in the * auxiliary search tree that @k corresponds to */ -void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bset_tree *t, +void bch_bset_fix_invalidated_key(struct btree *b, struct bset_tree *t, struct bkey_packed *k) { unsigned inorder, j = 1; @@ -1139,7 +1139,7 @@ void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bset_tree *t, } EXPORT_SYMBOL(bch_bset_fix_invalidated_key); -static void bch_bset_fix_lookup_table(struct btree_keys *b, +static void bch_bset_fix_lookup_table(struct btree *b, struct bset_tree *t, struct bkey_packed *where, unsigned clobber_u64s, @@ -1206,7 +1206,7 @@ verify: bset_aux_tree_verify(b); } -static void bch_bset_verify_lookup_table(struct btree_keys *b, +static void bch_bset_verify_lookup_table(struct btree *b, struct bset_tree *t) { struct bkey_packed *k; @@ -1238,7 +1238,7 @@ static void bch_bset_verify_lookup_table(struct btree_keys *b, BUG(); } -void bch_bset_insert(struct btree_keys *b, +void bch_bset_insert(struct btree *b, struct btree_node_iter *iter, struct bkey_packed *where, struct bkey_i *insert, @@ -1275,7 +1275,7 @@ void bch_bset_insert(struct btree_keys *b, bch_verify_btree_nr_keys(b); } -void bch_bset_delete(struct btree_keys *b, +void bch_bset_delete(struct btree *b, struct bkey_packed *where, unsigned clobber_u64s) { @@ -1294,7 +1294,7 @@ void bch_bset_delete(struct btree_keys *b, /* Lookup */ __flatten -static struct bkey_packed *bset_search_write_set(const struct btree_keys *b, +static struct bkey_packed *bset_search_write_set(const struct btree *b, struct bset_tree *t, struct bpos search, const struct bkey_packed *packed_search) @@ -1315,7 +1315,7 @@ static struct bkey_packed *bset_search_write_set(const struct btree_keys *b, } noinline -static int bset_search_tree_slowpath(const struct btree_keys *b, +static int bset_search_tree_slowpath(const struct btree *b, struct bset_tree *t, struct bpos *search, const struct bkey_packed *packed_search, unsigned n) @@ -1325,7 +1325,7 @@ static int bset_search_tree_slowpath(const struct btree_keys *b, } __flatten -static struct bkey_packed *bset_search_tree(const struct btree_keys *b, +static struct bkey_packed *bset_search_tree(const struct btree *b, struct bset_tree *t, struct bpos search, const struct bkey_packed *packed_search) @@ -1393,7 +1393,7 @@ static struct bkey_packed *bset_search_tree(const struct btree_keys *b, * Returns the first key greater than or equal to @search */ __always_inline __flatten -static struct bkey_packed *bch_bset_search(struct btree_keys *b, +static struct bkey_packed *bch_bset_search(struct btree *b, struct bset_tree *t, struct bpos search, struct bkey_packed *packed_search, @@ -1469,7 +1469,7 @@ static struct bkey_packed *bch_bset_search(struct btree_keys *b, /* Btree node iterator */ void bch_btree_node_iter_push(struct btree_node_iter *iter, - struct btree_keys *b, + struct btree *b, const struct bkey_packed *k, const struct bkey_packed *end) { @@ -1493,7 +1493,7 @@ void bch_btree_node_iter_push(struct btree_node_iter *iter, noinline __flatten __attribute__((cold)) static void btree_node_iter_init_pack_failed(struct btree_node_iter *iter, - struct btree_keys *b, struct bpos search, + struct btree *b, struct bpos search, bool strictly_greater, bool is_extents) { struct bset_tree *t; @@ -1552,7 +1552,7 @@ static void btree_node_iter_init_pack_failed(struct btree_node_iter *iter, * past any extents that compare equal to the position we searched for. */ void bch_btree_node_iter_init(struct btree_node_iter *iter, - struct btree_keys *b, struct bpos search, + struct btree *b, struct bpos search, bool strictly_greater, bool is_extents) { struct bset_tree *t; @@ -1588,7 +1588,7 @@ void bch_btree_node_iter_init(struct btree_node_iter *iter, } void bch_btree_node_iter_init_from_start(struct btree_node_iter *iter, - struct btree_keys *b, + struct btree *b, bool is_extents) { struct bset_tree *t; @@ -1603,7 +1603,7 @@ void bch_btree_node_iter_init_from_start(struct btree_node_iter *iter, } struct bkey_packed *bch_btree_node_iter_bset_pos(struct btree_node_iter *iter, - struct btree_keys *b, + struct btree *b, struct bset *i) { unsigned end = __btree_node_key_to_offset(b, bset_bkey_last(i)); @@ -1619,7 +1619,7 @@ struct bkey_packed *bch_btree_node_iter_bset_pos(struct btree_node_iter *iter, } static inline void btree_node_iter_sift(struct btree_node_iter *iter, - struct btree_keys *b, + struct btree *b, unsigned start) { unsigned i; @@ -1634,7 +1634,7 @@ static inline void btree_node_iter_sift(struct btree_node_iter *iter, } static inline void btree_node_iter_sort_two(struct btree_node_iter *iter, - struct btree_keys *b, + struct btree *b, unsigned first) { if (btree_node_iter_cmp(iter, b, @@ -1644,7 +1644,7 @@ static inline void btree_node_iter_sort_two(struct btree_node_iter *iter, } void bch_btree_node_iter_sort(struct btree_node_iter *iter, - struct btree_keys *b) + struct btree *b) { EBUG_ON(iter->used > 3); @@ -1667,7 +1667,7 @@ EXPORT_SYMBOL(bch_btree_node_iter_sort); * momentarily have out of order extents. */ void bch_btree_node_iter_advance(struct btree_node_iter *iter, - struct btree_keys *b) + struct btree *b) { struct bkey_packed *k = bch_btree_node_iter_peek_all(iter, b); @@ -1689,7 +1689,7 @@ void bch_btree_node_iter_advance(struct btree_node_iter *iter, * Expensive: */ struct bkey_packed *bch_btree_node_iter_prev_all(struct btree_node_iter *iter, - struct btree_keys *b) + struct btree *b) { struct bkey_packed *k, *prev = NULL; struct btree_node_iter_set *set; @@ -1738,7 +1738,7 @@ out: } struct bkey_packed *bch_btree_node_iter_prev(struct btree_node_iter *iter, - struct btree_keys *b) + struct btree *b) { struct bkey_packed *k; @@ -1750,7 +1750,7 @@ struct bkey_packed *bch_btree_node_iter_prev(struct btree_node_iter *iter, } struct bkey_s_c bch_btree_node_iter_peek_unpack(struct btree_node_iter *iter, - struct btree_keys *b, + struct btree *b, struct bkey *u) { struct bkey_packed *k = bch_btree_node_iter_peek(iter, b); @@ -1761,7 +1761,7 @@ EXPORT_SYMBOL(bch_btree_node_iter_peek_unpack); /* Mergesort */ -void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *stats) +void bch_btree_keys_stats(struct btree *b, struct bset_stats *stats) { struct bset_tree *t; @@ -1791,7 +1791,7 @@ void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *stats) } } -int bch_bkey_print_bfloat(struct btree_keys *b, struct bkey_packed *k, +int bch_bkey_print_bfloat(struct btree *b, struct bkey_packed *k, char *buf, size_t size) { struct bset_tree *t = bch_bkey_to_bset(b, k); diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h index 7517aeb08cf9..51043c969c9a 100644 --- a/drivers/md/bcache/bset.h +++ b/drivers/md/bcache/bset.h @@ -7,6 +7,7 @@ #include "bkey.h" #include "bkey_methods.h" +#include "btree_types.h" #include "util.h" /* for time_stats */ /* @@ -147,31 +148,6 @@ struct btree_node_iter; struct btree_node_iter_set; -#define MAX_BSETS 3U - -struct bset_tree { - /* - * We construct a binary tree in an array as if the array - * started at 1, so that things line up on the same cachelines - * better: see comments in bset.c at cacheline_to_bkey() for - * details - */ - - /* size of the binary tree and prev array */ - u16 size; - - /* function of size - precalculated for to_inorder() */ - u16 extra; - - u16 aux_data_offset; - - /* copy of the last key in the set */ - struct bkey_packed end; - - /* The actual btree node, with pointers to each sorted set */ - struct bset *data; -}; - enum bset_aux_tree_type { BSET_NO_AUX_TREE, BSET_RO_AUX_TREE, @@ -198,48 +174,10 @@ static inline enum bset_aux_tree_type bset_aux_tree_type(const struct bset_tree } } -struct btree_nr_keys { - - /* - * Amount of live metadata (i.e. size of node after a compaction) in - * units of u64s - */ - u16 live_u64s; - u16 bset_u64s[MAX_BSETS]; - - /* live keys only: */ - u16 packed_keys; - u16 unpacked_keys; -}; - typedef void (*compiled_unpack_fn)(struct bkey *, const struct bkey_packed *); -struct btree_keys { - u8 nsets; - u8 page_order; - u8 nr_key_bits; - u8 unpack_fn_len; - - struct btree_nr_keys nr; - - struct bkey_format format; - void *aux_data; - - /* - * Sets of sorted keys - the real btree node - plus a binary search tree - * - * set[0] is special; set[0]->tree, set[0]->prev and set[0]->data point - * to the memory we have allocated for this btree node. Additionally, - * set[0]->data points to the entire btree node as it exists on disk. - */ - struct bset_tree set[MAX_BSETS]; -#ifdef CONFIG_BCACHE_DEBUG - bool *expensive_debug_checks; -#endif -}; - static inline struct bkey -bkey_unpack_key_format_checked(const struct btree_keys *b, +bkey_unpack_key_format_checked(const struct btree *b, const struct bkey_packed *src) { struct bkey dst; @@ -264,7 +202,7 @@ bkey_unpack_key_format_checked(const struct btree_keys *b, /** * bkey_unpack_key -- unpack just the key, not the value */ -static inline struct bkey bkey_unpack_key(const struct btree_keys *b, +static inline struct bkey bkey_unpack_key(const struct btree *b, const struct bkey_packed *src) { return likely(bkey_packed(src)) @@ -274,7 +212,7 @@ static inline struct bkey bkey_unpack_key(const struct btree_keys *b, /* Disassembled bkeys */ -static inline struct bkey_s_c bkey_disassemble(struct btree_keys *b, +static inline struct bkey_s_c bkey_disassemble(struct btree *b, const struct bkey_packed *k, struct bkey *u) { @@ -284,7 +222,7 @@ static inline struct bkey_s_c bkey_disassemble(struct btree_keys *b, } /* non const version: */ -static inline struct bkey_s __bkey_disassemble(struct btree_keys *b, +static inline struct bkey_s __bkey_disassemble(struct btree *b, struct bkey_packed *k, struct bkey *u) { @@ -298,7 +236,7 @@ static inline struct bkey_s __bkey_disassemble(struct btree_keys *b, extern bool bch_expensive_debug_checks; -static inline bool btree_keys_expensive_checks(struct btree_keys *b) +static inline bool btree_keys_expensive_checks(struct btree *b) { #ifdef CONFIG_BCACHE_DEBUG return bch_expensive_debug_checks || *b->expensive_debug_checks; @@ -307,12 +245,6 @@ static inline bool btree_keys_expensive_checks(struct btree_keys *b) #endif } -static inline struct bset_tree *bset_tree_last(struct btree_keys *b) -{ - EBUG_ON(!b->nsets); - return b->set + b->nsets - 1; -} - static inline bool bset_has_ro_aux_tree(struct bset_tree *t) { return bset_aux_tree_type(t) == BSET_RO_AUX_TREE; @@ -323,7 +255,7 @@ static inline bool bset_has_rw_aux_tree(struct bset_tree *t) return bset_aux_tree_type(t) == BSET_RW_AUX_TREE; } -static inline void bch_bset_set_no_aux_tree(struct btree_keys *b, +static inline void bch_bset_set_no_aux_tree(struct btree *b, struct bset_tree *t) { BUG_ON(t < b->set); @@ -335,7 +267,7 @@ static inline void bch_bset_set_no_aux_tree(struct btree_keys *b, } } -static inline void btree_node_set_format(struct btree_keys *b, +static inline void btree_node_set_format(struct btree *b, struct bkey_format f) { int len; @@ -360,7 +292,7 @@ static inline void btree_node_set_format(struct btree_keys *b, #define set_blocks(_i, _block_bytes) \ __set_blocks((_i), (_i)->u64s, (_block_bytes)) -static inline struct bset *bset_next_set(struct btree_keys *b, +static inline struct bset *bset_next_set(struct btree *b, unsigned block_bytes) { struct bset *i = bset_tree_last(b)->data; @@ -370,24 +302,24 @@ static inline struct bset *bset_next_set(struct btree_keys *b, return ((void *) i) + round_up(set_bytes(i), block_bytes); } -void bch_btree_keys_free(struct btree_keys *); -int bch_btree_keys_alloc(struct btree_keys *, unsigned, gfp_t); -void bch_btree_keys_init(struct btree_keys *, bool *); +void bch_btree_keys_free(struct btree *); +int bch_btree_keys_alloc(struct btree *, unsigned, gfp_t); +void bch_btree_keys_init(struct btree *, bool *); -void bch_bset_init_first(struct btree_keys *, struct bset *); -void bch_bset_init_next(struct btree_keys *, struct bset *); -void bch_bset_build_aux_tree(struct btree_keys *, struct bset_tree *, bool); -void bch_bset_fix_invalidated_key(struct btree_keys *, struct bset_tree *, +void bch_bset_init_first(struct btree *, struct bset *); +void bch_bset_init_next(struct btree *, struct bset *); +void bch_bset_build_aux_tree(struct btree *, struct bset_tree *, bool); +void bch_bset_fix_invalidated_key(struct btree *, struct bset_tree *, struct bkey_packed *); -void bch_bset_insert(struct btree_keys *, struct btree_node_iter *, +void bch_bset_insert(struct btree *, struct btree_node_iter *, struct bkey_packed *, struct bkey_i *, unsigned); -void bch_bset_delete(struct btree_keys *, struct bkey_packed *, unsigned); +void bch_bset_delete(struct btree *, struct bkey_packed *, unsigned); /* Bkey utility code */ /* packed or unpacked */ -static inline int bkey_cmp_p_or_unp(const struct btree_keys *b, +static inline int bkey_cmp_p_or_unp(const struct btree *b, const struct bkey_packed *l, const struct bkey_packed *r_packed, struct bpos *r) @@ -413,7 +345,7 @@ static inline bool btree_iter_pos_cmp(struct bpos pos, const struct bkey *k, (cmp == 0 && !strictly_greater && !bkey_deleted(k)); } -static inline bool btree_iter_pos_cmp_packed(const struct btree_keys *b, +static inline bool btree_iter_pos_cmp_packed(const struct btree *b, struct bpos *pos, const struct bkey_packed *k, bool strictly_greater) @@ -424,7 +356,7 @@ static inline bool btree_iter_pos_cmp_packed(const struct btree_keys *b, (cmp == 0 && !strictly_greater && !bkey_deleted(k)); } -static inline bool btree_iter_pos_cmp_p_or_unp(const struct btree_keys *b, +static inline bool btree_iter_pos_cmp_p_or_unp(const struct btree *b, struct bpos pos, const struct bkey_packed *pos_packed, const struct bkey_packed *k, @@ -436,8 +368,6 @@ static inline bool btree_iter_pos_cmp_p_or_unp(const struct btree_keys *b, (cmp == 0 && !strictly_greater && !bkey_deleted(k)); } -#define BKEY_PADDED(key) __BKEY_PADDED(key, BKEY_EXTENT_VAL_U64s_MAX) - #define __bkey_idx(_set, _offset) \ ((_set)->_data + (_offset)) @@ -455,10 +385,10 @@ static inline struct bkey_packed *bset_bkey_idx(struct bset *i, unsigned idx) return bkey_idx(i, idx); } -struct bset_tree *bch_bkey_to_bset(struct btree_keys *, struct bkey_packed *); -struct bkey_packed *bkey_prev_all(struct btree_keys *, struct bset_tree *, +struct bset_tree *bch_bkey_to_bset(struct btree *, struct bkey_packed *); +struct bkey_packed *bkey_prev_all(struct btree *, struct bset_tree *, struct bkey_packed *); -struct bkey_packed *bkey_prev(struct btree_keys *, struct bset_tree *, +struct bkey_packed *bkey_prev(struct btree *, struct bset_tree *, struct bkey_packed *); enum bch_extent_overlap { @@ -497,19 +427,19 @@ static inline void __bch_btree_node_iter_init(struct btree_node_iter *iter, iter->is_extents = is_extents; } -void bch_btree_node_iter_push(struct btree_node_iter *, struct btree_keys *, +void bch_btree_node_iter_push(struct btree_node_iter *, struct btree *, const struct bkey_packed *, const struct bkey_packed *); -void bch_btree_node_iter_init(struct btree_node_iter *, struct btree_keys *, +void bch_btree_node_iter_init(struct btree_node_iter *, struct btree *, struct bpos, bool, bool); void bch_btree_node_iter_init_from_start(struct btree_node_iter *, - struct btree_keys *, bool); + struct btree *, bool); struct bkey_packed *bch_btree_node_iter_bset_pos(struct btree_node_iter *, - struct btree_keys *, + struct btree *, struct bset *); -void bch_btree_node_iter_sort(struct btree_node_iter *, struct btree_keys *); -void bch_btree_node_iter_advance(struct btree_node_iter *, struct btree_keys *); +void bch_btree_node_iter_sort(struct btree_node_iter *, struct btree *); +void bch_btree_node_iter_advance(struct btree_node_iter *, struct btree *); #define btree_node_iter_for_each(_iter, _set) \ for (_set = (_iter)->data; \ @@ -522,7 +452,7 @@ static inline bool bch_btree_node_iter_end(struct btree_node_iter *iter) } static inline u16 -__btree_node_key_to_offset(struct btree_keys *b, const struct bkey_packed *k) +__btree_node_key_to_offset(struct btree *b, const struct bkey_packed *k) { size_t ret = (u64 *) k - (u64 *) b->set->data; @@ -531,13 +461,13 @@ __btree_node_key_to_offset(struct btree_keys *b, const struct bkey_packed *k) } static inline struct bkey_packed * -__btree_node_offset_to_key(struct btree_keys *b, u16 k) +__btree_node_offset_to_key(struct btree *b, u16 k) { return (void *) ((u64 *) b->set->data + k); } static inline int __btree_node_iter_cmp(bool is_extents, - struct btree_keys *b, + struct btree *b, struct bkey_packed *l, struct bkey_packed *r) { @@ -555,7 +485,7 @@ static inline int __btree_node_iter_cmp(bool is_extents, } static inline int btree_node_iter_cmp(struct btree_node_iter *iter, - struct btree_keys *b, + struct btree *b, struct btree_node_iter_set l, struct btree_node_iter_set r) { @@ -565,7 +495,7 @@ static inline int btree_node_iter_cmp(struct btree_node_iter *iter, } static inline void __bch_btree_node_iter_push(struct btree_node_iter *iter, - struct btree_keys *b, + struct btree *b, const struct bkey_packed *k, const struct bkey_packed *end) { @@ -578,14 +508,14 @@ static inline void __bch_btree_node_iter_push(struct btree_node_iter *iter, static inline struct bkey_packed * __bch_btree_node_iter_peek_all(struct btree_node_iter *iter, - struct btree_keys *b) + struct btree *b) { return __btree_node_offset_to_key(b, iter->data->k); } static inline struct bkey_packed * bch_btree_node_iter_peek_all(struct btree_node_iter *iter, - struct btree_keys *b) + struct btree *b) { return bch_btree_node_iter_end(iter) ? NULL @@ -593,7 +523,7 @@ bch_btree_node_iter_peek_all(struct btree_node_iter *iter, } static inline struct bkey_packed * -bch_btree_node_iter_peek(struct btree_node_iter *iter, struct btree_keys *b) +bch_btree_node_iter_peek(struct btree_node_iter *iter, struct btree *b) { struct bkey_packed *ret; @@ -605,7 +535,7 @@ bch_btree_node_iter_peek(struct btree_node_iter *iter, struct btree_keys *b) } static inline struct bkey_packed * -bch_btree_node_iter_next_all(struct btree_node_iter *iter, struct btree_keys *b) +bch_btree_node_iter_next_all(struct btree_node_iter *iter, struct btree *b) { struct bkey_packed *ret = bch_btree_node_iter_peek_all(iter, b); @@ -616,9 +546,9 @@ bch_btree_node_iter_next_all(struct btree_node_iter *iter, struct btree_keys *b) } struct bkey_packed *bch_btree_node_iter_prev_all(struct btree_node_iter *, - struct btree_keys *); + struct btree *); struct bkey_packed *bch_btree_node_iter_prev(struct btree_node_iter *, - struct btree_keys *); + struct btree *); /* * Iterates over all _live_ keys - skipping deleted (and potentially @@ -630,7 +560,7 @@ struct bkey_packed *bch_btree_node_iter_prev(struct btree_node_iter *, bch_btree_node_iter_advance(iter, b)) struct bkey_s_c bch_btree_node_iter_peek_unpack(struct btree_node_iter *, - struct btree_keys *, + struct btree *, struct bkey *); #define for_each_btree_node_key_unpack(b, k, iter, _is_extents, unpacked)\ @@ -670,34 +600,34 @@ struct bset_stats { size_t failed_overflow; }; -void bch_btree_keys_stats(struct btree_keys *, struct bset_stats *); -int bch_bkey_print_bfloat(struct btree_keys *, struct bkey_packed *, +void bch_btree_keys_stats(struct btree *, struct bset_stats *); +int bch_bkey_print_bfloat(struct btree *, struct bkey_packed *, char *, size_t); /* Debug stuff */ -void bch_dump_bset(struct btree_keys *, struct bset *, unsigned); -void bch_dump_btree_node(struct btree_keys *); -void bch_dump_btree_node_iter(struct btree_keys *, struct btree_node_iter *); +void bch_dump_bset(struct btree *, struct bset *, unsigned); +void bch_dump_btree_node(struct btree *); +void bch_dump_btree_node_iter(struct btree *, struct btree_node_iter *); #ifdef CONFIG_BCACHE_DEBUG -void __bch_verify_btree_nr_keys(struct btree_keys *); -void bch_btree_node_iter_verify(struct btree_node_iter *, struct btree_keys *); -void bch_verify_key_order(struct btree_keys *, struct btree_node_iter *, +void __bch_verify_btree_nr_keys(struct btree *); +void bch_btree_node_iter_verify(struct btree_node_iter *, struct btree *); +void bch_verify_key_order(struct btree *, struct btree_node_iter *, struct bkey_packed *); #else -static inline void __bch_verify_btree_nr_keys(struct btree_keys *b) {} +static inline void __bch_verify_btree_nr_keys(struct btree *b) {} static inline void bch_btree_node_iter_verify(struct btree_node_iter *iter, - struct btree_keys *b) {} -static inline void bch_verify_key_order(struct btree_keys *b, + struct btree *b) {} +static inline void bch_verify_key_order(struct btree *b, struct btree_node_iter *iter, struct bkey_packed *where) {} #endif -static inline void bch_verify_btree_nr_keys(struct btree_keys *b) +static inline void bch_verify_btree_nr_keys(struct btree *b) { if (btree_keys_expensive_checks(b)) __bch_verify_btree_nr_keys(b); diff --git a/drivers/md/bcache/btree_cache.c b/drivers/md/bcache/btree_cache.c index 5c3e2a65a618..98501ccb5505 100644 --- a/drivers/md/bcache/btree_cache.c +++ b/drivers/md/bcache/btree_cache.c @@ -39,9 +39,9 @@ static void __mca_data_free(struct btree *b) { EBUG_ON(btree_node_write_in_flight(b)); - free_pages((unsigned long) b->data, b->keys.page_order); + free_pages((unsigned long) b->data, b->page_order); b->data = NULL; - bch_btree_keys_free(&b->keys); + bch_btree_keys_free(b); } static void mca_data_free(struct cache_set *c, struct btree *b) @@ -67,7 +67,7 @@ static void mca_data_alloc(struct cache_set *c, struct btree *b, gfp_t gfp) if (!b->data) goto err; - if (bch_btree_keys_alloc(&b->keys, order, gfp)) + if (bch_btree_keys_alloc(b, order, gfp)) goto err; c->btree_cache_used++; @@ -99,8 +99,8 @@ void mca_hash_remove(struct cache_set *c, struct btree *b) { BUG_ON(btree_node_dirty(b)); - b->keys.nsets = 0; - b->keys.set[0].data = NULL; + b->nsets = 0; + b->set[0].data = NULL; rhashtable_remove_fast(&c->btree_cache_table, &b->hash, bch_btree_cache_params); @@ -515,13 +515,13 @@ out_unlock: out: b->flags = 0; b->written = 0; - b->keys.nsets = 0; - b->keys.set[0].data = NULL; + b->nsets = 0; + b->set[0].data = NULL; b->sib_u64s[0] = 0; b->sib_u64s[1] = 0; b->whiteout_u64s = 0; b->uncompacted_whiteout_u64s = 0; - bch_btree_keys_init(&b->keys, &c->expensive_debug_checks); + bch_btree_keys_init(b, &c->expensive_debug_checks); bch_time_stats_update(&c->mca_alloc_time, start_time); @@ -675,10 +675,10 @@ retry: } } - prefetch(b->keys.aux_data); + prefetch(b->aux_data); - for_each_bset(&b->keys, t) - prefetch((u64 *) b->keys.aux_data + t->aux_data_offset); + for_each_bset(b, t) + prefetch((u64 *) b->aux_data + t->aux_data_offset); /* avoid atomic set bit if it's not needed: */ if (btree_node_accessed(b)) diff --git a/drivers/md/bcache/btree_gc.c b/drivers/md/bcache/btree_gc.c index 1def36d3cee9..29ce6fe2e238 100644 --- a/drivers/md/bcache/btree_gc.c +++ b/drivers/md/bcache/btree_gc.c @@ -148,7 +148,7 @@ static bool btree_gc_mark_node(struct cache_set *c, struct btree *b) struct bkey_s_c k; u8 stale = 0; - for_each_btree_node_key_unpack(&b->keys, k, &iter, + for_each_btree_node_key_unpack(b, k, &iter, btree_node_is_extents(b), &unpacked) { bkey_debugcheck(c, b, k); @@ -201,7 +201,7 @@ static int bch_gc_btree(struct cache_set *c, enum btree_id btree_id) for_each_btree_node(&iter, c, btree_id, POS_MIN, depth, b) { btree_node_range_checks(c, b, &r); - bch_verify_btree_nr_keys(&b->keys); + bch_verify_btree_nr_keys(b); should_rewrite = btree_gc_mark_node(c, b); @@ -436,14 +436,14 @@ static void recalc_packed_keys(struct btree *b) { struct bkey_packed *k; - memset(&b->keys.nr, 0, sizeof(b->keys.nr)); + memset(&b->nr, 0, sizeof(b->nr)); - BUG_ON(b->keys.nsets != 1); + BUG_ON(b->nsets != 1); - for (k = b->keys.set[0].data->start; - k != bset_bkey_last(b->keys.set[0].data); + for (k = b->set[0].data->start; + k != bset_bkey_last(b->set[0].data); k = bkey_next(k)) - btree_keys_account_key_add(&b->keys.nr, 0, k); + btree_keys_account_key_add(&b->nr, 0, k); } static void bch_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES], @@ -465,7 +465,7 @@ static void bch_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES], /* Count keys that are not deleted */ for (i = 0; i < GC_MERGE_NODES && old_nodes[i]; i++) - u64s += old_nodes[i]->keys.nr.live_u64s; + u64s += old_nodes[i]->nr.live_u64s; nr_old_nodes = nr_new_nodes = i; @@ -565,7 +565,7 @@ static void bch_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES], } else if (u64s) { /* move part of n2 into n1 */ n1->key.k.p = n1->data->max_key = - bkey_unpack_key(&n1->keys, last).p; + bkey_unpack_key(n1, last).p; n2->data->min_key = btree_type_successor(iter->btree_id, @@ -853,7 +853,7 @@ static void bch_initial_gc_btree(struct cache_set *c, enum btree_id id) struct bkey unpacked; struct bkey_s_c k; - for_each_btree_node_key_unpack(&b->keys, k, &node_iter, + for_each_btree_node_key_unpack(b, k, &node_iter, btree_node_is_extents(b), &unpacked) btree_mark_key(c, b, k); diff --git a/drivers/md/bcache/btree_io.c b/drivers/md/bcache/btree_io.c index 97c7e1d325f2..84e5713ad8a7 100644 --- a/drivers/md/bcache/btree_io.c +++ b/drivers/md/bcache/btree_io.c @@ -24,8 +24,8 @@ static void verify_no_dups(struct btree *b, struct bkey_packed *k; for (k = start; k != end && bkey_next(k) != end; k = bkey_next(k)) { - struct bkey l = bkey_unpack_key(&b->keys, k); - struct bkey r = bkey_unpack_key(&b->keys, bkey_next(k)); + struct bkey l = bkey_unpack_key(b, k); + struct bkey r = bkey_unpack_key(b, bkey_next(k)); BUG_ON(btree_node_is_extents(b) ? bkey_cmp(l.p, bkey_start_pos(&r)) > 0 @@ -76,12 +76,12 @@ static void *btree_bounce_alloc(struct cache_set *c, unsigned order, return page_address(mempool_alloc(&c->btree_bounce_pool, GFP_NOIO)); } -typedef int (*sort_cmp_fn)(struct btree_keys *, +typedef int (*sort_cmp_fn)(struct btree *, struct bkey_packed *, struct bkey_packed *); struct sort_iter { - struct btree_keys *b; + struct btree *b; unsigned used; struct sort_iter_set { @@ -89,7 +89,7 @@ struct sort_iter { } data[MAX_BSETS + 1]; }; -static void sort_iter_init(struct sort_iter *iter, struct btree_keys *b) +static void sort_iter_init(struct sort_iter *iter, struct btree *b) { memset(iter, 0, sizeof(*iter)); iter->b = b; @@ -162,7 +162,7 @@ static inline struct bkey_packed *sort_iter_next(struct sort_iter *iter, return ret; } -static inline int sort_key_whiteouts_cmp(struct btree_keys *b, +static inline int sort_key_whiteouts_cmp(struct btree *b, struct bkey_packed *l, struct bkey_packed *r) { @@ -184,7 +184,7 @@ static unsigned sort_key_whiteouts(struct bkey_packed *dst, return (u64 *) out - (u64 *) dst; } -static inline int sort_extent_whiteouts_cmp(struct btree_keys *b, +static inline int sort_extent_whiteouts_cmp(struct btree *b, struct bkey_packed *l, struct bkey_packed *r) { @@ -266,7 +266,7 @@ static unsigned should_compact_bset(struct btree *b, struct bset_tree *t, bool compacting, enum compact_mode mode) { - unsigned live_u64s = b->keys.nr.bset_u64s[t - b->keys.set]; + unsigned live_u64s = b->nr.bset_u64s[t - b->set]; unsigned bset_u64s = le16_to_cpu(t->data->u64s); if (live_u64s == bset_u64s) @@ -287,7 +287,7 @@ static unsigned should_compact_bset(struct btree *b, struct bset_tree *t, bool __bch_compact_whiteouts(struct cache_set *c, struct btree *b, enum compact_mode mode) { - const struct bkey_format *f = &b->keys.format; + const struct bkey_format *f = &b->format; struct bset_tree *t; struct bkey_packed *whiteouts = NULL; struct bkey_packed *u_start, *u_pos; @@ -295,14 +295,14 @@ bool __bch_compact_whiteouts(struct cache_set *c, struct btree *b, unsigned order, whiteout_u64s = 0, u64s; bool used_mempool, compacting = false; - for_each_bset(&b->keys, t) + for_each_bset(b, t) whiteout_u64s += should_compact_bset(b, t, whiteout_u64s != 0, mode); if (!whiteout_u64s) return false; - sort_iter_init(&sort_iter, &b->keys); + sort_iter_init(&sort_iter, b); whiteout_u64s += b->whiteout_u64s; order = get_order(whiteout_u64s * sizeof(u64)); @@ -316,12 +316,12 @@ bool __bch_compact_whiteouts(struct cache_set *c, struct btree *b, sort_iter_add(&sort_iter, u_start, u_pos); - for_each_bset(&b->keys, t) { + for_each_bset(b, t) { struct bset *i = t->data; struct bkey_packed *k, *n, *out, *start, *end; struct btree_node_entry *src = NULL, *dst = NULL; - if (t != b->keys.set && bset_unwritten(b, i)) { + if (t != b->set && bset_unwritten(b, i)) { src = container_of(i, struct btree_node_entry, keys); dst = max(write_block(b), (void *) bset_bkey_last(t[-1].data)); @@ -377,7 +377,7 @@ bool __bch_compact_whiteouts(struct cache_set *c, struct btree *b, if (mode != COMPACT_WRITTEN_NO_WRITE_LOCK) { i->u64s = cpu_to_le16((u64 *) out - i->_data); - bch_bset_set_no_aux_tree(&b->keys, t); + bch_bset_set_no_aux_tree(b, t); } } @@ -413,7 +413,7 @@ bool __bch_compact_whiteouts(struct cache_set *c, struct btree *b, bch_btree_build_aux_trees(b); bch_btree_keys_u64s_remaining(c, b); - bch_verify_btree_nr_keys(&b->keys); + bch_verify_btree_nr_keys(b); return true; } @@ -423,7 +423,7 @@ static bool bch_drop_whiteouts(struct btree *b) struct bset_tree *t; bool ret = false; - for_each_bset(&b->keys, t) { + for_each_bset(b, t) { struct bset *i = t->data; struct bkey_packed *k, *n, *out, *start, *end; @@ -434,7 +434,7 @@ static bool bch_drop_whiteouts(struct btree *b) end = bset_bkey_last(i); if (bset_unwritten(b, i) && - t != b->keys.set) { + t != b->set) { struct bset *dst = max_t(struct bset *, write_block(b), (void *) bset_bkey_last(t[-1].data)); @@ -455,16 +455,16 @@ static bool bch_drop_whiteouts(struct btree *b) } i->u64s = cpu_to_le16((u64 *) out - i->_data); - bch_bset_set_no_aux_tree(&b->keys, t); + bch_bset_set_no_aux_tree(b, t); ret = true; } - bch_verify_btree_nr_keys(&b->keys); + bch_verify_btree_nr_keys(b); return ret; } -static inline int sort_keys_cmp(struct btree_keys *b, +static inline int sort_keys_cmp(struct btree *b, struct bkey_packed *l, struct bkey_packed *r) { @@ -514,7 +514,7 @@ static unsigned sort_keys(struct bkey_packed *dst, return (u64 *) out - (u64 *) dst; } -static inline int sort_extents_cmp(struct btree_keys *b, +static inline int sort_extents_cmp(struct btree *b, struct bkey_packed *l, struct bkey_packed *r) { @@ -558,12 +558,12 @@ static void btree_node_sort(struct cache_set *c, struct btree *b, u64 start_time; unsigned i, u64s = 0, order, shift = end_idx - start_idx - 1; bool sorting_entire_node = start_idx == 0 && - end_idx == b->keys.nsets; + end_idx == b->nsets; - sort_iter_init(&sort_iter, &b->keys); + sort_iter_init(&sort_iter, b); - for (t = b->keys.set + start_idx; - t < b->keys.set + end_idx; + for (t = b->set + start_idx; + t < b->set + end_idx; t++) { u64s += le16_to_cpu(t->data->u64s); sort_iter_add(&sort_iter, t->data->start, @@ -571,7 +571,7 @@ static void btree_node_sort(struct cache_set *c, struct btree *b, } order = sorting_entire_node - ? b->keys.page_order + ? b->page_order : get_order(__set_bytes(b->data, u64s)); out = btree_bounce_alloc(c, order, &used_mempool); @@ -579,7 +579,7 @@ static void btree_node_sort(struct cache_set *c, struct btree *b, start_time = local_clock(); if (btree_node_is_extents(b)) - filter_whiteouts = bset_written(b, b->keys.set[start_idx].data); + filter_whiteouts = bset_written(b, b->set[start_idx].data); u64s = btree_node_is_extents(b) ? sort_extents(out->keys.start, &sort_iter, filter_whiteouts) @@ -594,17 +594,17 @@ static void btree_node_sort(struct cache_set *c, struct btree *b, bch_time_stats_update(&c->btree_sort_time, start_time); /* Make sure we preserve bset journal_seq: */ - for (t = b->keys.set + start_idx + 1; - t < b->keys.set + end_idx; + for (t = b->set + start_idx + 1; + t < b->set + end_idx; t++) - b->keys.set[start_idx].data->journal_seq = - max(b->keys.set[start_idx].data->journal_seq, + b->set[start_idx].data->journal_seq = + max(b->set[start_idx].data->journal_seq, t->data->journal_seq); if (sorting_entire_node) { unsigned u64s = le16_to_cpu(out->keys.u64s); - BUG_ON(order != b->keys.page_order); + BUG_ON(order != b->page_order); /* * Our temporary buffer is the same size as the btree node's @@ -614,38 +614,38 @@ static void btree_node_sort(struct cache_set *c, struct btree *b, *out = *b->data; out->keys.u64s = cpu_to_le16(u64s); swap(out, b->data); - b->keys.set->data = &b->data->keys; + b->set->data = &b->data->keys; } else { - b->keys.set[start_idx].data->u64s = out->keys.u64s; - memcpy_u64s(b->keys.set[start_idx].data->start, + b->set[start_idx].data->u64s = out->keys.u64s; + memcpy_u64s(b->set[start_idx].data->start, out->keys.start, le16_to_cpu(out->keys.u64s)); } for (i = start_idx + 1; i < end_idx; i++) - b->keys.nr.bset_u64s[start_idx] += - b->keys.nr.bset_u64s[i]; + b->nr.bset_u64s[start_idx] += + b->nr.bset_u64s[i]; - b->keys.nsets -= shift; + b->nsets -= shift; - for (i = start_idx + 1; i < b->keys.nsets; i++) { - b->keys.nr.bset_u64s[i] = b->keys.nr.bset_u64s[i + shift]; - b->keys.set[i] = b->keys.set[i + shift]; + for (i = start_idx + 1; i < b->nsets; i++) { + b->nr.bset_u64s[i] = b->nr.bset_u64s[i + shift]; + b->set[i] = b->set[i + shift]; } - for (i = b->keys.nsets; i < MAX_BSETS; i++) - b->keys.nr.bset_u64s[i] = 0; + for (i = b->nsets; i < MAX_BSETS; i++) + b->nr.bset_u64s[i] = 0; - bch_bset_set_no_aux_tree(&b->keys, &b->keys.set[start_idx]); + bch_bset_set_no_aux_tree(b, &b->set[start_idx]); btree_bounce_free(c, order, used_mempool, out); - bch_verify_btree_nr_keys(&b->keys); + bch_verify_btree_nr_keys(b); } /* Sort + repack in a new format: */ static struct btree_nr_keys sort_repack(struct bset *dst, - struct btree_keys *src, + struct btree *src, struct btree_node_iter *src_iter, struct bkey_format *out_f, bool filter_whiteouts) @@ -677,7 +677,7 @@ static struct btree_nr_keys sort_repack(struct bset *dst, /* Sort, repack, and merge: */ static struct btree_nr_keys sort_repack_merge(struct cache_set *c, struct bset *dst, - struct btree_keys *src, + struct btree *src, struct btree_node_iter *iter, struct bkey_format *out_f, bool filter_whiteouts, @@ -746,35 +746,35 @@ void bch_btree_sort_into(struct cache_set *c, struct btree_node_iter src_iter; u64 start_time = local_clock(); - BUG_ON(dst->keys.nsets != 1); + BUG_ON(dst->nsets != 1); - bch_bset_set_no_aux_tree(&dst->keys, dst->keys.set); + bch_bset_set_no_aux_tree(dst, dst->set); - bch_btree_node_iter_init_from_start(&src_iter, &src->keys, + bch_btree_node_iter_init_from_start(&src_iter, src, btree_node_is_extents(src)); if (btree_node_ops(src)->key_normalize || btree_node_ops(src)->key_merge) - nr = sort_repack_merge(c, dst->keys.set->data, - &src->keys, &src_iter, - &dst->keys.format, + nr = sort_repack_merge(c, dst->set->data, + src, &src_iter, + &dst->format, true, btree_node_ops(src)->key_normalize, btree_node_ops(src)->key_merge); else - nr = sort_repack(dst->keys.set->data, - &src->keys, &src_iter, - &dst->keys.format, + nr = sort_repack(dst->set->data, + src, &src_iter, + &dst->format, true); bch_time_stats_update(&c->btree_sort_time, start_time); - dst->keys.nr.live_u64s += nr.live_u64s; - dst->keys.nr.bset_u64s[0] += nr.bset_u64s[0]; - dst->keys.nr.packed_keys += nr.packed_keys; - dst->keys.nr.unpacked_keys += nr.unpacked_keys; + dst->nr.live_u64s += nr.live_u64s; + dst->nr.bset_u64s[0] += nr.bset_u64s[0]; + dst->nr.packed_keys += nr.packed_keys; + dst->nr.unpacked_keys += nr.unpacked_keys; - bch_verify_btree_nr_keys(&dst->keys); + bch_verify_btree_nr_keys(dst); } #define SORT_CRIT (4096 / sizeof(u64)) @@ -790,14 +790,14 @@ static bool btree_node_compact(struct cache_set *c, struct btree *b, bool ret = false; for (unwritten_idx = 0; - unwritten_idx < b->keys.nsets; + unwritten_idx < b->nsets; unwritten_idx++) - if (bset_unwritten(b, b->keys.set[unwritten_idx].data)) + if (bset_unwritten(b, b->set[unwritten_idx].data)) break; - if (b->keys.nsets - unwritten_idx > 1) { + if (b->nsets - unwritten_idx > 1) { btree_node_sort(c, b, iter, unwritten_idx, - b->keys.nsets, false); + b->nsets, false); ret = true; } @@ -813,10 +813,10 @@ void bch_btree_build_aux_trees(struct btree *b) { struct bset_tree *t; - for_each_bset(&b->keys, t) - bch_bset_build_aux_tree(&b->keys, t, + for_each_bset(b, t) + bch_bset_build_aux_tree(b, t, bset_unwritten(b, t->data) && - t == bset_tree_last(&b->keys)); + t == bset_tree_last(b)); } /* @@ -841,7 +841,7 @@ void bch_btree_init_next(struct cache_set *c, struct btree *b, bne = want_new_bset(c, b); if (bne) - bch_bset_init_next(&b->keys, &bne->keys); + bch_bset_init_next(b, &bne->keys); bch_btree_build_aux_trees(b); @@ -931,9 +931,9 @@ static const char *validate_bset(struct cache_set *c, struct btree *b, } if (BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN) - bch_bkey_swab(btree_node_type(b), &b->keys.format, k); + bch_bkey_swab(btree_node_type(b), &b->format, k); - u = bkey_disassemble(&b->keys, k, &tmp); + u = bkey_disassemble(b, k, &tmp); invalid = btree_bkey_invalid(c, b, u); if (invalid) { @@ -958,7 +958,7 @@ static const char *validate_bset(struct cache_set *c, struct btree *b, if (!seen_non_whiteout && (!bkey_whiteout(k) || - (prev && bkey_cmp_left_packed_byval(&b->keys, prev, + (prev && bkey_cmp_left_packed_byval(b, prev, bkey_start_pos(u.k)) > 0))) { *whiteout_u64s = k->_data - i->_data; seen_non_whiteout = true; @@ -1040,9 +1040,9 @@ void bch_btree_node_read_done(struct cache_set *c, struct btree *b, if (err) goto err; - b->keys.set->data = &b->data->keys; + b->set->data = &b->data->keys; - btree_node_set_format(&b->keys, b->data->format); + btree_node_set_format(b, b->data->format); } else { bne = write_block(b); i = &bne->keys; @@ -1076,11 +1076,11 @@ void bch_btree_node_read_done(struct cache_set *c, struct btree *b, if (ret) continue; - __bch_btree_node_iter_push(iter, &b->keys, + __bch_btree_node_iter_push(iter, b, i->start, bkey_idx(i, whiteout_u64s)); - __bch_btree_node_iter_push(iter, &b->keys, + __bch_btree_node_iter_push(iter, b, bkey_idx(i, whiteout_u64s), bset_bkey_last(i)); } @@ -1095,30 +1095,30 @@ void bch_btree_node_read_done(struct cache_set *c, struct btree *b, sorted = btree_bounce_alloc(c, ilog2(btree_pages(c)), &used_mempool); sorted->keys.u64s = 0; - b->keys.nr = btree_node_is_extents(b) - ? bch_extent_sort_fix_overlapping(c, &sorted->keys, &b->keys, iter) - : bch_key_sort_fix_overlapping(&sorted->keys, &b->keys, iter); + b->nr = btree_node_is_extents(b) + ? bch_extent_sort_fix_overlapping(c, &sorted->keys, b, iter) + : bch_key_sort_fix_overlapping(&sorted->keys, b, iter); u64s = le16_to_cpu(sorted->keys.u64s); *sorted = *b->data; sorted->keys.u64s = cpu_to_le16(u64s); swap(sorted, b->data); - b->keys.set->data = &b->data->keys; - b->keys.nsets = 1; + b->set->data = &b->data->keys; + b->nsets = 1; - BUG_ON(b->keys.nr.live_u64s != u64s); + BUG_ON(b->nr.live_u64s != u64s); btree_bounce_free(c, ilog2(btree_pages(c)), used_mempool, sorted); - bch_bset_build_aux_tree(&b->keys, b->keys.set, false); + bch_bset_build_aux_tree(b, b->set, false); - set_needs_whiteout(b->keys.set->data); + set_needs_whiteout(b->set->data); btree_node_reset_sib_u64s(b); err = "short btree key"; - if (b->keys.set[0].size && - bkey_cmp_packed(&b->keys, &b->key.k, &b->keys.set[0].end) < 0) + if (b->set[0].size && + bkey_cmp_packed(b, &b->key.k, &b->set[0].end) < 0) goto err; out: @@ -1332,8 +1332,7 @@ void __bch_btree_node_write(struct cache_set *c, struct btree *b, BUG_ON(b->written >= c->sb.btree_node_size); BUG_ON(bset_written(b, btree_bset_last(b))); BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(&c->disk_sb)); - BUG_ON(memcmp(&b->data->format, &b->keys.format, - sizeof(b->keys.format))); + BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format))); if (lock_type_held == SIX_LOCK_intent) { six_lock_write(&b->lock); @@ -1345,7 +1344,7 @@ void __bch_btree_node_write(struct cache_set *c, struct btree *b, BUG_ON(b->uncompacted_whiteout_u64s); - sort_iter_init(&sort_iter, &b->keys); + sort_iter_init(&sort_iter, b); bytes = !b->written ? sizeof(struct btree_node) @@ -1353,7 +1352,7 @@ void __bch_btree_node_write(struct cache_set *c, struct btree *b, bytes += b->whiteout_u64s * sizeof(u64); - for_each_bset(&b->keys, t) { + for_each_bset(b, t) { i = t->data; if (bset_written(b, i)) @@ -1528,14 +1527,14 @@ bool bch_btree_post_write_cleanup(struct cache_set *c, struct btree *b) * XXX: decide if we really want to unconditionally sort down to a * single bset: */ - if (b->keys.nsets > 1) { - btree_node_sort(c, b, NULL, 0, b->keys.nsets, true); + if (b->nsets > 1) { + btree_node_sort(c, b, NULL, 0, b->nsets, true); invalidated_iter = true; } else { invalidated_iter = bch_drop_whiteouts(b); } - for_each_bset(&b->keys, t) + for_each_bset(b, t) set_needs_whiteout(t->data); bch_btree_verify(c, b); @@ -1548,7 +1547,7 @@ bool bch_btree_post_write_cleanup(struct cache_set *c, struct btree *b) bne = want_new_bset(c, b); if (bne) - bch_bset_init_next(&b->keys, &bne->keys); + bch_bset_init_next(b, &bne->keys); bch_btree_build_aux_trees(b); @@ -1652,7 +1651,7 @@ void bch_btree_node_flush_journal_entries(struct cache_set *c, struct btree *b, struct closure *cl) { - int i = b->keys.nsets; + int i = b->nsets; /* * Journal sequence numbers in the different bsets will always be in @@ -1661,7 +1660,7 @@ void bch_btree_node_flush_journal_entries(struct cache_set *c, * need to loop: */ while (i--) { - u64 seq = le64_to_cpu(b->keys.set[i].data->journal_seq); + u64 seq = le64_to_cpu(b->set[i].data->journal_seq); if (seq) { bch_journal_flush_seq_async(&c->journal, seq, cl); @@ -1669,4 +1668,3 @@ void bch_btree_node_flush_journal_entries(struct cache_set *c, } } } - diff --git a/drivers/md/bcache/btree_io.h b/drivers/md/bcache/btree_io.h index 20faadce3ffa..3d95296a9cb0 100644 --- a/drivers/md/bcache/btree_io.h +++ b/drivers/md/bcache/btree_io.h @@ -31,8 +31,8 @@ static inline bool bch_maybe_compact_whiteouts(struct cache_set *c, struct btree { struct bset_tree *t; - for_each_bset(&b->keys, t) { - unsigned live_u64s = b->keys.nr.bset_u64s[t - b->keys.set]; + for_each_bset(b, t) { + unsigned live_u64s = b->nr.bset_u64s[t - b->set]; unsigned bset_u64s = le16_to_cpu(t->data->u64s); if (live_u64s * 4 < bset_u64s * 3) diff --git a/drivers/md/bcache/btree_iter.c b/drivers/md/bcache/btree_iter.c index dc86a36cc1ec..5f71058c0532 100644 --- a/drivers/md/bcache/btree_iter.c +++ b/drivers/md/bcache/btree_iter.c @@ -291,30 +291,30 @@ static void __bch_btree_iter_verify(struct btree_iter *iter, struct btree_node_iter tmp = *node_iter; struct bkey_packed *k; - bch_btree_node_iter_verify(node_iter, &b->keys); + bch_btree_node_iter_verify(node_iter, b); /* * For interior nodes, the iterator will have skipped past * deleted keys: */ k = b->level - ? bch_btree_node_iter_prev(&tmp, &b->keys) - : bch_btree_node_iter_prev_all(&tmp, &b->keys); - if (k && btree_iter_pos_cmp_packed(&b->keys, &iter->pos, k, + ? bch_btree_node_iter_prev(&tmp, b) + : bch_btree_node_iter_prev_all(&tmp, b); + if (k && btree_iter_pos_cmp_packed(b, &iter->pos, k, iter->is_extents)) { char buf[100]; - struct bkey uk = bkey_unpack_key(&b->keys, k); + struct bkey uk = bkey_unpack_key(b, k); bch_bkey_to_text(buf, sizeof(buf), &uk); panic("prev key should be before after pos:\n%s\n%llu:%llu\n", buf, iter->pos.inode, iter->pos.offset); } - k = bch_btree_node_iter_peek_all(node_iter, &b->keys); - if (k && !btree_iter_pos_cmp_packed(&b->keys, &iter->pos, k, + k = bch_btree_node_iter_peek_all(node_iter, b); + if (k && !btree_iter_pos_cmp_packed(b, &iter->pos, k, iter->is_extents)) { char buf[100]; - struct bkey uk = bkey_unpack_key(&b->keys, k); + struct bkey uk = bkey_unpack_key(b, k); bch_bkey_to_text(buf, sizeof(buf), &uk); panic("next key should be before iter pos:\n%llu:%llu\n%s\n", @@ -345,9 +345,9 @@ static void __bch_btree_node_iter_fix(struct btree_iter *iter, { const struct bkey_packed *end = bset_bkey_last(t->data); struct btree_node_iter_set *set; - unsigned offset = __btree_node_key_to_offset(&b->keys, where); + unsigned offset = __btree_node_key_to_offset(b, where); int shift = new_u64s - clobber_u64s; - unsigned old_end = (int) __btree_node_key_to_offset(&b->keys, end) - shift; + unsigned old_end = (int) __btree_node_key_to_offset(b, end) - shift; btree_node_iter_for_each(node_iter, set) if (set->end == old_end) @@ -355,9 +355,9 @@ static void __bch_btree_node_iter_fix(struct btree_iter *iter, /* didn't find the bset in the iterator - might have to readd it: */ if (new_u64s && - btree_iter_pos_cmp_packed(&b->keys, &iter->pos, where, + btree_iter_pos_cmp_packed(b, &iter->pos, where, iter->is_extents)) - bch_btree_node_iter_push(node_iter, &b->keys, where, end); + bch_btree_node_iter_push(node_iter, b, where, end); return; found: set->end = (int) set->end + shift; @@ -367,15 +367,15 @@ found: return; if (new_u64s && - btree_iter_pos_cmp_packed(&b->keys, &iter->pos, where, + btree_iter_pos_cmp_packed(b, &iter->pos, where, iter->is_extents)) { set->k = offset; - bch_btree_node_iter_sort(node_iter, &b->keys); + bch_btree_node_iter_sort(node_iter, b); } else if (set->k < offset + clobber_u64s) { set->k = offset + new_u64s; if (set->k == set->end) *set = node_iter->data[--node_iter->used]; - bch_btree_node_iter_sort(node_iter, &b->keys); + bch_btree_node_iter_sort(node_iter, b); } else { set->k = (int) set->k + shift; } @@ -403,33 +403,33 @@ found: * to. */ if (b->level && new_u64s && !bkey_deleted(where) && - btree_iter_pos_cmp_packed(&b->keys, &iter->pos, where, + btree_iter_pos_cmp_packed(b, &iter->pos, where, iter->is_extents)) { struct bset_tree *t; struct bkey_packed *k; - for_each_bset(&b->keys, t) { - if (bch_bkey_to_bset(&b->keys, where) == t) + for_each_bset(b, t) { + if (bch_bkey_to_bset(b, where) == t) continue; - k = bkey_prev_all(&b->keys, t, + k = bkey_prev_all(b, t, bch_btree_node_iter_bset_pos(node_iter, - &b->keys, t->data)); + b, t->data)); if (k && - __btree_node_iter_cmp(node_iter, &b->keys, + __btree_node_iter_cmp(node_iter, b, k, where) > 0) { struct btree_node_iter_set *set; unsigned offset = - __btree_node_key_to_offset(&b->keys, bkey_next(k)); + __btree_node_key_to_offset(b, bkey_next(k)); btree_node_iter_for_each(node_iter, set) if (set->k == offset) { - set->k = __btree_node_key_to_offset(&b->keys, k); - bch_btree_node_iter_sort(node_iter, &b->keys); + set->k = __btree_node_key_to_offset(b, k); + bch_btree_node_iter_sort(node_iter, b); goto next_bset; } - bch_btree_node_iter_push(node_iter, &b->keys, k, + bch_btree_node_iter_push(node_iter, b, k, bset_bkey_last(t->data)); } next_bset: @@ -470,7 +470,7 @@ void bch_btree_node_iter_fix(struct btree_iter *iter, /* peek_all() doesn't skip deleted keys */ static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *iter) { - struct btree_keys *b = &iter->nodes[iter->level]->keys; + struct btree *b = iter->nodes[iter->level]; struct bkey_packed *k = bch_btree_node_iter_peek_all(&iter->node_iters[iter->level], b); struct bkey_s_c ret; @@ -483,14 +483,14 @@ static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *iter) ret = bkey_disassemble(b, k, &iter->k); if (debug_check_bkeys(iter->c)) - bkey_debugcheck(iter->c, iter->nodes[iter->level], ret); + bkey_debugcheck(iter->c, b, ret); return ret; } static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter) { - struct btree_keys *b = &iter->nodes[iter->level]->keys; + struct btree *b = iter->nodes[iter->level]; struct bkey_packed *k = bch_btree_node_iter_peek(&iter->node_iters[iter->level], b); struct bkey_s_c ret; @@ -503,7 +503,7 @@ static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter) ret = bkey_disassemble(b, k, &iter->k); if (debug_check_bkeys(iter->c)) - bkey_debugcheck(iter->c, iter->nodes[iter->level], ret); + bkey_debugcheck(iter->c, b, ret); return ret; } @@ -511,7 +511,7 @@ static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter) static inline void __btree_iter_advance(struct btree_iter *iter) { bch_btree_node_iter_advance(&iter->node_iters[iter->level], - &iter->nodes[iter->level]->keys); + iter->nodes[iter->level]); } /* @@ -532,13 +532,13 @@ static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b) return; k = bch_btree_node_iter_peek_all(&iter->node_iters[b->level + 1], - &iter->nodes[b->level + 1]->keys); + iter->nodes[b->level + 1]); if (!k || bkey_deleted(k) || - bkey_cmp_left_packed(&iter->nodes[b->level + 1]->keys, + bkey_cmp_left_packed(iter->nodes[b->level + 1], k, &b->key.k.p)) { char buf[100]; - struct bkey uk = bkey_unpack_key(&b->keys, k); + struct bkey uk = bkey_unpack_key(b, k); bch_bkey_to_text(buf, sizeof(buf), &uk); panic("parent iter doesn't point to new node:\n%s\n%llu:%llu\n", @@ -552,13 +552,13 @@ static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b) static inline void __btree_iter_init(struct btree_iter *iter, struct btree *b) { - bch_btree_node_iter_init(&iter->node_iters[b->level], &b->keys, + bch_btree_node_iter_init(&iter->node_iters[b->level], b, iter->pos, iter->is_extents, btree_node_is_extents(b)); /* Skip to first non whiteout: */ if (b->level) - bch_btree_node_iter_peek(&iter->node_iters[b->level], &b->keys); + bch_btree_node_iter_peek(&iter->node_iters[b->level], b); } static inline void btree_iter_node_set(struct btree_iter *iter, @@ -964,14 +964,14 @@ struct btree *bch_btree_iter_next_node(struct btree_iter *iter, unsigned depth) void bch_btree_iter_set_pos_same_leaf(struct btree_iter *iter, struct bpos new_pos) { - struct btree_keys *b = &iter->nodes[0]->keys; + struct btree *b = iter->nodes[0]; struct btree_node_iter *node_iter = &iter->node_iters[0]; struct bkey_packed *k; EBUG_ON(iter->level != 0); EBUG_ON(bkey_cmp(new_pos, iter->pos) < 0); EBUG_ON(!btree_node_locked(iter, 0)); - EBUG_ON(bkey_cmp(new_pos, iter->nodes[0]->key.k.p) > 0); + EBUG_ON(bkey_cmp(new_pos, b->key.k.p) > 0); while ((k = bch_btree_node_iter_peek_all(node_iter, b)) && !btree_iter_pos_cmp_packed(b, &new_pos, k, @@ -979,8 +979,7 @@ void bch_btree_iter_set_pos_same_leaf(struct btree_iter *iter, struct bpos new_p bch_btree_node_iter_advance(node_iter, b); if (!k && - !btree_iter_pos_cmp(new_pos, &iter->nodes[0]->key.k, - iter->is_extents)) + !btree_iter_pos_cmp(new_pos, &b->key.k, iter->is_extents)) iter->at_end_of_leaf = true; iter->pos = new_pos; diff --git a/drivers/md/bcache/btree_types.h b/drivers/md/bcache/btree_types.h index 0d711ec91c20..a8dd798e08fb 100644 --- a/drivers/md/bcache/btree_types.h +++ b/drivers/md/bcache/btree_types.h @@ -8,7 +8,6 @@ #include <linux/workqueue.h> #include "bkey_methods.h" -#include "bset.h" #include "journal_types.h" #include "six.h" @@ -16,20 +15,48 @@ struct cache_set; struct open_bucket; struct btree_interior_update; -struct btree_write { - struct journal_entry_pin journal; - struct closure_waitlist wait; +#define MAX_BSETS 3U + +struct btree_nr_keys { + + /* + * Amount of live metadata (i.e. size of node after a compaction) in + * units of u64s + */ + u16 live_u64s; + u16 bset_u64s[MAX_BSETS]; + + /* live keys only: */ + u16 packed_keys; + u16 unpacked_keys; }; -struct btree_root { - struct btree *b; +struct bset_tree { + /* + * We construct a binary tree in an array as if the array + * started at 1, so that things line up on the same cachelines + * better: see comments in bset.c at cacheline_to_bkey() for + * details + */ - struct btree_interior_update *as; + /* size of the binary tree and prev array */ + u16 size; - /* On disk root - see async splits: */ - __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX); - u8 level; - u8 alive; + /* function of size - precalculated for to_inorder() */ + u16 extra; + + u16 aux_data_offset; + + /* copy of the last key in the set */ + struct bkey_packed end; + + /* The actual btree node, with pointers to each sorted set */ + struct bset *data; +}; + +struct btree_write { + struct journal_entry_pin journal; + struct closure_waitlist wait; }; struct btree { @@ -49,8 +76,26 @@ struct btree { u16 whiteout_u64s; u16 uncompacted_whiteout_u64s; - struct btree_keys keys; + u8 nsets; + u8 page_order; + u8 nr_key_bits; + u8 unpack_fn_len; + + struct btree_nr_keys nr; + + struct bkey_format format; + struct btree_node *data; + void *aux_data; + + /* + * Sets of sorted keys - the real btree node - plus a binary search tree + * + * set[0] is special; set[0]->tree, set[0]->prev and set[0]->data point + * to the memory we have allocated for this btree node. Additionally, + * set[0]->data points to the entire btree node as it exists on disk. + */ + struct bset_tree set[MAX_BSETS]; /* * XXX: add a delete sequence number, so when btree_node_relock() fails @@ -76,6 +121,10 @@ struct btree { struct list_head list; struct btree_write writes[2]; + +#ifdef CONFIG_BCACHE_DEBUG + bool *expensive_debug_checks; +#endif }; #define BTREE_FLAG(flag) \ @@ -118,12 +167,18 @@ static inline struct btree_write *btree_prev_write(struct btree *b) static inline struct bset *btree_bset_first(struct btree *b) { - return b->keys.set->data; + return b->set->data; +} + +static inline struct bset_tree *bset_tree_last(struct btree *b) +{ + EBUG_ON(!b->nsets); + return b->set + b->nsets - 1; } static inline struct bset *btree_bset_last(struct btree *b) { - return bset_tree_last(&b->keys)->data; + return bset_tree_last(b)->data; } static inline unsigned bset_byte_offset(struct btree *b, void *i) @@ -152,6 +207,17 @@ static inline bool btree_node_is_extents(struct btree *b) return btree_node_type(b) == BKEY_TYPE_EXTENTS; } +struct btree_root { + struct btree *b; + + struct btree_interior_update *as; + + /* On disk root - see async splits: */ + __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX); + u8 level; + u8 alive; +}; + /* * Optional hook that will be called just prior to a btree node update, when * we're holding the write lock and we know what key is about to be overwritten: @@ -159,6 +225,7 @@ static inline bool btree_node_is_extents(struct btree *b) struct btree_iter; struct bucket_stats_cache_set; +struct btree_node_iter; enum extent_insert_hook_ret { BTREE_HOOK_DO_INSERT, @@ -192,7 +259,7 @@ enum btree_gc_coalesce_fail_reason { }; typedef struct btree_nr_keys (*sort_fix_overlapping_fn)(struct bset *, - struct btree_keys *, + struct btree *, struct btree_node_iter *); #endif /* _BCACHE_BTREE_TYPES_H */ diff --git a/drivers/md/bcache/btree_update.c b/drivers/md/bcache/btree_update.c index 944afe9558ac..246568b53056 100644 --- a/drivers/md/bcache/btree_update.c +++ b/drivers/md/bcache/btree_update.c @@ -30,12 +30,12 @@ void __bch_btree_calc_format(struct bkey_format_state *s, struct btree *b) struct bset_tree *t; struct bkey uk; - for_each_bset(&b->keys, t) + for_each_bset(b, t) for (k = t->data->start; k != bset_bkey_last(t->data); k = bkey_next(k)) if (!bkey_whiteout(k)) { - uk = bkey_unpack_key(&b->keys, k); + uk = bkey_unpack_key(b, k); bch_bkey_format_add_key(s, &uk); } } @@ -53,18 +53,18 @@ static struct bkey_format bch_btree_calc_format(struct btree *b) static size_t btree_node_u64s_with_format(struct btree *b, struct bkey_format *new_f) { - struct bkey_format *old_f = &b->keys.format; + struct bkey_format *old_f = &b->format; /* stupid integer promotion rules */ ssize_t delta = (((int) new_f->key_u64s - old_f->key_u64s) * - (int) b->keys.nr.packed_keys) + + (int) b->nr.packed_keys) + (((int) new_f->key_u64s - BKEY_U64s) * - (int) b->keys.nr.unpacked_keys); + (int) b->nr.unpacked_keys); - BUG_ON(delta + b->keys.nr.live_u64s < 0); + BUG_ON(delta + b->nr.live_u64s < 0); - return b->keys.nr.live_u64s + delta; + return b->nr.live_u64s + delta; } /** @@ -78,7 +78,7 @@ bool bch_btree_node_format_fits(struct btree *b, struct bkey_format *new_f) size_t u64s = btree_node_u64s_with_format(b, new_f); return __set_bytes(b->data, u64s) < - PAGE_SIZE << b->keys.page_order; + PAGE_SIZE << b->page_order; } /* Btree node freeing/allocation: */ @@ -294,8 +294,8 @@ static struct btree *bch_btree_node_alloc(struct cache_set *c, set_btree_node_accessed(b); set_btree_node_dirty(b); - bch_bset_init_first(&b->keys, &b->data->keys); - memset(&b->keys.nr, 0, sizeof(b->keys.nr)); + bch_bset_init_first(b, &b->data->keys); + memset(&b->nr, 0, sizeof(b->nr)); b->data->magic = cpu_to_le64(bset_magic(&c->disk_sb)); SET_BSET_BTREE_LEVEL(&b->data->keys, level); @@ -320,7 +320,7 @@ struct btree *__btree_node_alloc_replacement(struct cache_set *c, n->data->max_key = b->data->max_key; n->data->format = format; - btree_node_set_format(&n->keys, format); + btree_node_set_format(n, format); bch_btree_sort_into(c, n, b); @@ -343,7 +343,7 @@ struct btree *btree_node_alloc_replacement(struct cache_set *c, * the btree node anymore, use the old format for now: */ if (!bch_btree_node_format_fits(b, &new_f)) - new_f = b->keys.format; + new_f = b->format; return __btree_node_alloc_replacement(c, b, new_f, reserve); } @@ -468,7 +468,7 @@ static struct btree *__btree_root_alloc(struct cache_set *c, unsigned level, b->data->format = bch_btree_calc_format(b); b->key.k.p = POS_MAX; - btree_node_set_format(&b->keys, b->data->format); + btree_node_set_format(b, b->data->format); bch_btree_build_aux_trees(b); six_unlock_write(&b->lock); @@ -636,17 +636,17 @@ static void bch_insert_fixup_btree_ptr(struct btree_iter *iter, c->sb.btree_node_size, true, gc_pos_btree_node(b), &stats); - while ((k = bch_btree_node_iter_peek_all(node_iter, &b->keys)) && - !btree_iter_pos_cmp_packed(&b->keys, &insert->k.p, k, false)) - bch_btree_node_iter_advance(node_iter, &b->keys); + while ((k = bch_btree_node_iter_peek_all(node_iter, b)) && + !btree_iter_pos_cmp_packed(b, &insert->k.p, k, false)) + bch_btree_node_iter_advance(node_iter, b); /* * If we're overwriting, look up pending delete and mark so that gc * marks it on the pending delete list: */ - if (k && !bkey_cmp_packed(&b->keys, k, &insert->k)) + if (k && !bkey_cmp_packed(b, k, &insert->k)) bch_btree_node_free_index(c, b, iter->btree_id, - bkey_disassemble(&b->keys, k, &tmp), + bkey_disassemble(b, k, &tmp), &stats); bch_cache_set_stats_apply(c, &stats, disk_res, gc_pos_btree_node(b)); @@ -663,7 +663,7 @@ bool bch_btree_bset_insert_key(struct btree_iter *iter, struct btree_node_iter *node_iter, struct bkey_i *insert) { - const struct bkey_format *f = &b->keys.format; + const struct bkey_format *f = &b->format; struct bkey_packed *k; struct bset_tree *t; unsigned clobber_u64s; @@ -675,11 +675,11 @@ bool bch_btree_bset_insert_key(struct btree_iter *iter, bkey_cmp(insert->k.p, b->data->max_key) > 0); BUG_ON(insert->k.u64s > bch_btree_keys_u64s_remaining(iter->c, b)); - k = bch_btree_node_iter_peek_all(node_iter, &b->keys); - if (k && !bkey_cmp_packed(&b->keys, k, &insert->k)) { + k = bch_btree_node_iter_peek_all(node_iter, b); + if (k && !bkey_cmp_packed(b, k, &insert->k)) { BUG_ON(bkey_whiteout(k)); - t = bch_bkey_to_bset(&b->keys, k); + t = bch_bkey_to_bset(b, k); if (bset_unwritten(b, t->data) && bkey_val_u64s(&insert->k) == bkeyp_val_u64s(f, k)) { @@ -693,9 +693,9 @@ bool bch_btree_bset_insert_key(struct btree_iter *iter, insert->k.needs_whiteout = k->needs_whiteout; - btree_keys_account_key_drop(&b->keys.nr, t - b->keys.set, k); + btree_keys_account_key_drop(&b->nr, t - b->set, k); - if (t == bset_tree_last(&b->keys)) { + if (t == bset_tree_last(b)) { clobber_u64s = k->u64s; /* @@ -704,7 +704,7 @@ bool bch_btree_bset_insert_key(struct btree_iter *iter, * been written to disk) - just delete it: */ if (bkey_whiteout(&insert->k) && !k->needs_whiteout) { - bch_bset_delete(&b->keys, k, clobber_u64s); + bch_bset_delete(b, k, clobber_u64s); bch_btree_node_iter_fix(iter, b, node_iter, t, k, clobber_u64s, 0); return true; @@ -733,11 +733,11 @@ bool bch_btree_bset_insert_key(struct btree_iter *iter, insert->k.needs_whiteout = false; } - t = bset_tree_last(&b->keys); - k = bch_btree_node_iter_bset_pos(node_iter, &b->keys, t->data); + t = bset_tree_last(b); + k = bch_btree_node_iter_bset_pos(node_iter, b, t->data); clobber_u64s = 0; overwrite: - bch_bset_insert(&b->keys, node_iter, k, insert, clobber_u64s); + bch_bset_insert(b, node_iter, k, insert, clobber_u64s); if (k->u64s != clobber_u64s || bkey_whiteout(&insert->k)) bch_btree_node_iter_fix(iter, b, node_iter, t, k, clobber_u64s, k->u64s); @@ -1093,7 +1093,7 @@ void bch_btree_interior_update_will_free_node(struct cache_set *c, * over the bset->journal_seq tracking, since we'll be mixing those keys * in with keys that aren't in the journal anymore: */ - for_each_bset(&b->keys, t) + for_each_bset(b, t) as->journal_seq = max(as->journal_seq, t->data->journal_seq); /* @@ -1151,33 +1151,33 @@ static void btree_node_interior_verify(struct btree *b) BUG_ON(!b->level); - bch_btree_node_iter_init(&iter, &b->keys, b->key.k.p, false, false); + bch_btree_node_iter_init(&iter, b, b->key.k.p, false, false); #if 1 - BUG_ON(!(k = bch_btree_node_iter_peek(&iter, &b->keys)) || - bkey_cmp_left_packed(&b->keys, k, &b->key.k.p)); + BUG_ON(!(k = bch_btree_node_iter_peek(&iter, b)) || + bkey_cmp_left_packed(b, k, &b->key.k.p)); - BUG_ON((bch_btree_node_iter_advance(&iter, &b->keys), + BUG_ON((bch_btree_node_iter_advance(&iter, b), !bch_btree_node_iter_end(&iter))); #else const char *msg; msg = "not found"; - k = bch_btree_node_iter_peek(&iter, &b->keys); + k = bch_btree_node_iter_peek(&iter, b); if (!k) goto err; msg = "isn't what it should be"; - if (bkey_cmp_left_packed(&b->keys, k, &b->key.k.p)) + if (bkey_cmp_left_packed(b, k, &b->key.k.p)) goto err; - bch_btree_node_iter_advance(&iter, &b->keys); + bch_btree_node_iter_advance(&iter, b); msg = "isn't last key"; if (!bch_btree_node_iter_end(&iter)) goto err; return; err: - bch_dump_btree_node(&b->keys); + bch_dump_btree_node(b); printk(KERN_ERR "last key %llu:%llu %s\n", b->key.k.p.inode, b->key.k.p.offset, msg); BUG(); @@ -1218,8 +1218,8 @@ bch_btree_insert_keys_interior(struct btree *b, * the iterator's current position - they know the keys go in * the node the iterator points to: */ - while ((k = bch_btree_node_iter_prev_all(&node_iter, &b->keys)) && - (bkey_cmp_packed(&b->keys, k, &insert->k) >= 0)) + while ((k = bch_btree_node_iter_prev_all(&node_iter, b)) && + (bkey_cmp_packed(b, k, &insert->k) >= 0)) ; while (!bch_keylist_empty(insert_keys)) { @@ -1234,8 +1234,8 @@ bch_btree_insert_keys_interior(struct btree *b, for_each_linked_btree_node(iter, b, linked) bch_btree_node_iter_peek(&linked->node_iters[b->level], - &b->keys); - bch_btree_node_iter_peek(&iter->node_iters[b->level], &b->keys); + b); + bch_btree_node_iter_peek(&iter->node_iters[b->level], b); bch_btree_iter_verify(iter, b); @@ -1262,10 +1262,10 @@ static struct btree *__btree_split_node(struct btree_iter *iter, struct btree *n n2 = bch_btree_node_alloc(iter->c, n1->level, iter->btree_id, reserve); n2->data->max_key = n1->data->max_key; - n2->data->format = n1->keys.format; + n2->data->format = n1->format; n2->key.k.p = n1->key.k.p; - btree_node_set_format(&n2->keys, n2->data->format); + btree_node_set_format(n2, n2->data->format); set1 = btree_bset_first(n1); set2 = btree_bset_first(n2); @@ -1292,7 +1292,7 @@ static struct btree *__btree_split_node(struct btree_iter *iter, struct btree *n BUG_ON(!prev); - n1->key.k.p = bkey_unpack_key(&n1->keys, prev).p; + n1->key.k.p = bkey_unpack_key(n1, prev).p; n1->data->max_key = n1->key.k.p; n2->data->min_key = btree_type_successor(n1->btree_id, n1->key.k.p); @@ -1300,17 +1300,15 @@ static struct btree *__btree_split_node(struct btree_iter *iter, struct btree *n set2->u64s = cpu_to_le16((u64 *) bset_bkey_last(set1) - (u64 *) k); set1->u64s = cpu_to_le16(le16_to_cpu(set1->u64s) - le16_to_cpu(set2->u64s)); - n2->keys.nr.live_u64s = le16_to_cpu(set2->u64s); - n2->keys.nr.bset_u64s[0] = le16_to_cpu(set2->u64s); - n2->keys.nr.packed_keys - = n1->keys.nr.packed_keys - nr_packed; - n2->keys.nr.unpacked_keys - = n1->keys.nr.unpacked_keys - nr_unpacked; + n2->nr.live_u64s = le16_to_cpu(set2->u64s); + n2->nr.bset_u64s[0] = le16_to_cpu(set2->u64s); + n2->nr.packed_keys = n1->nr.packed_keys - nr_packed; + n2->nr.unpacked_keys = n1->nr.unpacked_keys - nr_unpacked; - n1->keys.nr.live_u64s = le16_to_cpu(set1->u64s); - n1->keys.nr.bset_u64s[0] = le16_to_cpu(set1->u64s); - n1->keys.nr.packed_keys = nr_packed; - n1->keys.nr.unpacked_keys = nr_unpacked; + n1->nr.live_u64s = le16_to_cpu(set1->u64s); + n1->nr.bset_u64s[0] = le16_to_cpu(set1->u64s); + n1->nr.packed_keys = nr_packed; + n1->nr.unpacked_keys = nr_unpacked; BUG_ON(!set1->u64s); BUG_ON(!set2->u64s); @@ -1322,8 +1320,8 @@ static struct btree *__btree_split_node(struct btree_iter *iter, struct btree *n btree_node_reset_sib_u64s(n1); btree_node_reset_sib_u64s(n2); - bch_verify_btree_nr_keys(&n1->keys); - bch_verify_btree_nr_keys(&n2->keys); + bch_verify_btree_nr_keys(n1); + bch_verify_btree_nr_keys(n2); if (n1->level) { btree_node_interior_verify(n1); @@ -1355,7 +1353,7 @@ static void btree_split_insert_keys(struct btree_iter *iter, struct btree *b, BUG_ON(btree_node_type(b) != BKEY_TYPE_BTREE); - bch_btree_node_iter_init(&node_iter, &b->keys, k->k.p, false, false); + bch_btree_node_iter_init(&node_iter, b, k->k.p, false, false); while (!bch_keylist_empty(keys)) { k = bch_keylist_front(keys); @@ -1385,8 +1383,8 @@ static void btree_split_insert_keys(struct btree_iter *iter, struct btree *b, } else p = bkey_next(p); - BUG_ON(b->keys.nsets != 1 || - b->keys.nr.live_u64s != le16_to_cpu(b->keys.set->data->u64s)); + BUG_ON(b->nsets != 1 || + b->nr.live_u64s != le16_to_cpu(b->set->data->u64s)); btree_node_interior_verify(b); } @@ -1413,7 +1411,7 @@ static void btree_split(struct btree *b, struct btree_iter *iter, if (__set_blocks(n1->data, le16_to_cpu(n1->data->keys.u64s), block_bytes(c)) > BTREE_SPLIT_THRESHOLD(c)) { - trace_bcache_btree_node_split(c, b, b->keys.nr.live_u64s); + trace_bcache_btree_node_split(c, b, b->nr.live_u64s); n2 = __btree_split_node(iter, n1, reserve); @@ -1445,7 +1443,7 @@ static void btree_split(struct btree *b, struct btree_iter *iter, bch_btree_node_write(c, n3, &as->cl, SIX_LOCK_intent, -1); } } else { - trace_bcache_btree_node_compact(c, b, b->keys.nr.live_u64s); + trace_bcache_btree_node_compact(c, b, b->nr.live_u64s); bch_btree_build_aux_trees(n1); six_unlock_write(&n1->lock); @@ -1605,19 +1603,19 @@ static struct btree *btree_node_get_sibling(struct btree_iter *iter, node_iter = iter->node_iters[parent->level]; - k = bch_btree_node_iter_peek_all(&node_iter, &parent->keys); - BUG_ON(bkey_cmp_left_packed(&parent->keys, k, &b->key.k.p)); + k = bch_btree_node_iter_peek_all(&node_iter, parent); + BUG_ON(bkey_cmp_left_packed(parent, k, &b->key.k.p)); do { k = sib == btree_prev_sib - ? bch_btree_node_iter_prev_all(&node_iter, &parent->keys) - : (bch_btree_node_iter_advance(&node_iter, &parent->keys), - bch_btree_node_iter_peek_all(&node_iter, &parent->keys)); + ? bch_btree_node_iter_prev_all(&node_iter, parent) + : (bch_btree_node_iter_advance(&node_iter, parent), + bch_btree_node_iter_peek_all(&node_iter, parent)); if (!k) return NULL; } while (bkey_deleted(k)); - bkey_unpack(&parent->keys, &tmp.k, k); + bkey_unpack(parent, &tmp.k, k); ret = bch_btree_node_get(iter, &tmp.k, level, SIX_LOCK_intent); @@ -1741,7 +1739,7 @@ retry: n->data->format = new_f; n->key.k.p = next->key.k.p; - btree_node_set_format(&n->keys, new_f); + btree_node_set_format(n, new_f); bch_btree_sort_into(c, n, prev); bch_btree_sort_into(c, n, next); @@ -1816,14 +1814,14 @@ btree_insert_key(struct btree_insert *trans, struct btree *b = iter->nodes[0]; enum btree_insert_ret ret; int old_u64s = le16_to_cpu(btree_bset_last(b)->u64s); - int old_live_u64s = b->keys.nr.live_u64s; + int old_live_u64s = b->nr.live_u64s; int live_u64s_added, u64s_added; ret = !btree_node_is_extents(b) ? bch_insert_fixup_key(trans, insert) : bch_insert_fixup_extent(trans, insert); - live_u64s_added = (int) b->keys.nr.live_u64s - old_live_u64s; + live_u64s_added = (int) b->nr.live_u64s - old_live_u64s; u64s_added = (int) le16_to_cpu(btree_bset_last(b)->u64s) - old_u64s; if (b->sib_u64s[0] != U16_MAX && live_u64s_added < 0) diff --git a/drivers/md/bcache/btree_update.h b/drivers/md/bcache/btree_update.h index 65b0421b6b9c..2b83906ec359 100644 --- a/drivers/md/bcache/btree_update.h +++ b/drivers/md/bcache/btree_update.h @@ -20,8 +20,8 @@ struct btree; static inline void btree_node_reset_sib_u64s(struct btree *b) { - b->sib_u64s[0] = b->keys.nr.live_u64s; - b->sib_u64s[1] = b->keys.nr.live_u64s; + b->sib_u64s[0] = b->nr.live_u64s; + b->sib_u64s[1] = b->nr.live_u64s; } struct btree_reserve { @@ -279,9 +279,9 @@ static inline void unreserve_whiteout(struct btree *b, struct bset_tree *t, { if (bset_written(b, t->data)) { EBUG_ON(b->uncompacted_whiteout_u64s < - bkeyp_key_u64s(&b->keys.format, k)); + bkeyp_key_u64s(&b->format, k)); b->uncompacted_whiteout_u64s -= - bkeyp_key_u64s(&b->keys.format, k); + bkeyp_key_u64s(&b->format, k); } } @@ -291,7 +291,7 @@ static inline void reserve_whiteout(struct btree *b, struct bset_tree *t, if (bset_written(b, t->data)) { BUG_ON(!k->needs_whiteout); b->uncompacted_whiteout_u64s += - bkeyp_key_u64s(&b->keys.format, k); + bkeyp_key_u64s(&b->format, k); } } diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index 18f8aaf26010..1be2e607228c 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c @@ -59,7 +59,7 @@ void __bch_btree_verify(struct cache_set *c, struct btree *b) v->written = 0; v->level = b->level; v->btree_id = b->btree_id; - bch_btree_keys_init(&v->keys, &c->expensive_debug_checks); + bch_btree_keys_init(v, &c->expensive_debug_checks); pick = bch_btree_pick_ptr(c, b); if (IS_ERR_OR_NULL(pick.ca)) @@ -101,10 +101,10 @@ void __bch_btree_verify(struct cache_set *c, struct btree *b) console_lock(); printk(KERN_ERR "*** in memory:\n"); - bch_dump_bset(&b->keys, inmemory, 0); + bch_dump_bset(b, inmemory, 0); printk(KERN_ERR "*** read back in:\n"); - bch_dump_bset(&v->keys, sorted, 0); + bch_dump_bset(v, sorted, 0); while (offset < b->written) { if (!offset ) { @@ -125,7 +125,7 @@ void __bch_btree_verify(struct cache_set *c, struct btree *b) } printk(KERN_ERR "*** on disk block %u:\n", offset); - bch_dump_bset(&b->keys, i, offset); + bch_dump_bset(b, i, offset); offset += sectors; } @@ -298,12 +298,12 @@ static const struct file_operations btree_debug_ops = { static int print_btree_node(struct dump_iter *i, struct btree *b) { - const struct bkey_format *f = &b->keys.format; + const struct bkey_format *f = &b->format; struct bset_stats stats; memset(&stats, 0, sizeof(stats)); - bch_btree_keys_stats(&b->keys, &stats); + bch_btree_keys_stats(b, &stats); i->bytes = scnprintf(i->buf, sizeof(i->buf), "l %u %llu:%llu - %llu:%llu:\n" @@ -328,15 +328,15 @@ static int print_btree_node(struct dump_iter *i, struct btree *b) f->bits_per_field[2], f->bits_per_field[3], f->bits_per_field[4], - b->keys.unpack_fn_len, - b->keys.nr.live_u64s * sizeof(u64), + b->unpack_fn_len, + b->nr.live_u64s * sizeof(u64), btree_bytes(i->c) - sizeof(struct btree_node), - b->keys.nr.live_u64s * 100 / btree_max_u64s(i->c), + b->nr.live_u64s * 100 / btree_max_u64s(i->c), b->sib_u64s[0], b->sib_u64s[1], BTREE_FOREGROUND_MERGE_THRESHOLD(i->c), - b->keys.nr.packed_keys, - b->keys.nr.unpacked_keys, + b->nr.packed_keys, + b->nr.unpacked_keys, stats.floats, stats.failed_unpacked, stats.failed_prev, @@ -416,7 +416,7 @@ static ssize_t bch_read_bfloat_failed(struct file *file, char __user *buf, while ((k = bch_btree_iter_peek(&iter)).k && !(err = btree_iter_err(k))) { - struct btree_keys *b = &iter.nodes[0]->keys; + struct btree *b = iter.nodes[0]; struct btree_node_iter *node_iter = &iter.node_iters[0]; struct bkey_packed *_k = bch_btree_node_iter_peek(node_iter, b); diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c index ec11b9f7c841..3f2d8df90904 100644 --- a/drivers/md/bcache/extents.c +++ b/drivers/md/bcache/extents.c @@ -22,11 +22,11 @@ #include <trace/events/bcache.h> static bool __bch_extent_normalize(struct cache_set *, struct bkey_s, bool); -static enum merge_result bch_extent_merge(struct cache_set *, struct btree_keys *, +static enum merge_result bch_extent_merge(struct cache_set *, struct btree *, struct bkey_i *, struct bkey_i *); static void sort_key_next(struct btree_node_iter *iter, - struct btree_keys *b, + struct btree *b, struct btree_node_iter_set *i) { i->k += __btree_node_offset_to_key(b, i->k)->u64s; @@ -52,7 +52,7 @@ static void sort_key_next(struct btree_node_iter *iter, }) static inline bool should_drop_next_key(struct btree_node_iter *iter, - struct btree_keys *b) + struct btree *b) { struct btree_node_iter_set *l = iter->data, *r = iter->data + 1; struct bkey_packed *k = __btree_node_offset_to_key(b, l->k); @@ -78,7 +78,7 @@ static inline bool should_drop_next_key(struct btree_node_iter *iter, } struct btree_nr_keys bch_key_sort_fix_overlapping(struct bset *dst, - struct btree_keys *b, + struct btree *b, struct btree_node_iter *iter) { struct bkey_packed *out = dst->start; @@ -317,7 +317,7 @@ static void bch_extent_drop_stale(struct cache_set *c, struct bkey_s_extent e) bch_extent_drop_redundant_crcs(e); } -static bool bch_ptr_normalize(struct cache_set *c, struct btree_keys *bk, +static bool bch_ptr_normalize(struct cache_set *c, struct btree *bk, struct bkey_s k) { return __bch_extent_normalize(c, k, false); @@ -671,7 +671,7 @@ void bch_key_resize(struct bkey *k, * that we have to unpack the key, modify the unpacked key - then this * copies/repacks the unpacked to the original as necessary. */ -static bool __extent_save(struct btree_keys *b, struct btree_node_iter *iter, +static bool __extent_save(struct btree *b, struct btree_node_iter *iter, struct bkey_packed *dst, struct bkey *src) { struct bkey_format *f = &b->format; @@ -691,7 +691,7 @@ static bool __extent_save(struct btree_keys *b, struct btree_node_iter *iter, return ret; } -static void extent_save(struct btree_keys *b, struct btree_node_iter *iter, +static void extent_save(struct btree *b, struct btree_node_iter *iter, struct bkey_packed *dst, struct bkey *src) { BUG_ON(!__extent_save(b, iter, dst, src)); @@ -716,13 +716,13 @@ static void extent_save(struct btree_keys *b, struct btree_node_iter *iter, }) static inline void extent_sort_sift(struct btree_node_iter *iter, - struct btree_keys *b, size_t i) + struct btree *b, size_t i) { heap_sift(iter, i, extent_sort_cmp); } static inline void extent_sort_next(struct btree_node_iter *iter, - struct btree_keys *b, + struct btree *b, struct btree_node_iter_set *i) { sort_key_next(iter, b, i); @@ -730,7 +730,7 @@ static inline void extent_sort_next(struct btree_node_iter *iter, } static void extent_sort_append(struct cache_set *c, - struct btree_keys *b, + struct btree *b, struct btree_nr_keys *nr, struct bkey_packed *start, struct bkey_packed **prev, @@ -762,7 +762,7 @@ static void extent_sort_append(struct cache_set *c, struct btree_nr_keys bch_extent_sort_fix_overlapping(struct cache_set *c, struct bset *dst, - struct btree_keys *b, + struct btree *b, struct btree_node_iter *iter) { struct bkey_format *f = &b->format; @@ -1080,10 +1080,10 @@ static void extent_bset_insert(struct cache_set *c, struct btree_iter *iter, { struct btree *b = iter->nodes[0]; struct btree_node_iter *node_iter = &iter->node_iters[0]; - struct bset_tree *t = bset_tree_last(&b->keys); + struct bset_tree *t = bset_tree_last(b); struct bkey_packed *where = - bch_btree_node_iter_bset_pos(node_iter, &b->keys, t->data); - struct bkey_packed *prev = bkey_prev(&b->keys, t, where); + bch_btree_node_iter_bset_pos(node_iter, b, t->data); + struct bkey_packed *prev = bkey_prev(b, t, where); struct bkey_packed *next_live_key = where; unsigned clobber_u64s; @@ -1109,12 +1109,12 @@ static void extent_bset_insert(struct cache_set *c, struct btree_iter *iter, next_live_key, false)) goto drop_deleted_keys; - bch_bset_insert(&b->keys, node_iter, where, insert, clobber_u64s); + bch_bset_insert(b, node_iter, where, insert, clobber_u64s); bch_btree_node_iter_fix(iter, b, node_iter, t, where, clobber_u64s, where->u64s); return; drop_deleted_keys: - bch_bset_delete(&b->keys, where, clobber_u64s); + bch_bset_delete(b, where, clobber_u64s); bch_btree_node_iter_fix(iter, b, node_iter, t, where, clobber_u64s, 0); } @@ -1298,7 +1298,7 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert, /* insert overlaps with start of k: */ bch_cut_subtract_front(iter, insert->k.p, k, &s->stats); BUG_ON(bkey_deleted(k.k)); - extent_save(&b->keys, node_iter, _k, k.k); + extent_save(b, node_iter, _k, k.k); break; case BCH_EXTENT_OVERLAP_BACK: @@ -1307,14 +1307,14 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert, bkey_start_pos(&insert->k), k, &s->stats); BUG_ON(bkey_deleted(k.k)); - extent_save(&b->keys, node_iter, _k, k.k); + extent_save(b, node_iter, _k, k.k); /* * As the auxiliary tree is indexed by the end of the * key and we've just changed the end, update the * auxiliary tree. */ - bch_bset_fix_invalidated_key(&b->keys, t, _k); + bch_bset_fix_invalidated_key(b, t, _k); bch_btree_node_iter_fix(iter, b, node_iter, t, _k, _k->u64s, _k->u64s); break; @@ -1324,12 +1324,12 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert, /* The insert key completely covers k, invalidate k */ if (!bkey_whiteout(k.k)) - btree_keys_account_key_drop(&b->keys.nr, - t - b->keys.set, _k); + btree_keys_account_key_drop(&b->nr, + t - b->set, _k); bch_drop_subtract(iter, k, &s->stats); k.k->p = bkey_start_pos(&insert->k); - if (!__extent_save(&b->keys, node_iter, _k, k.k)) { + if (!__extent_save(b, node_iter, _k, k.k)) { /* * Couldn't repack: we aren't necessarily able * to repack if the new key is outside the range @@ -1337,7 +1337,7 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert, * @insert: */ k.k->p = orig_pos; - extent_save(&b->keys, node_iter, _k, k.k); + extent_save(b, node_iter, _k, k.k); if (extent_insert_advance_pos(s, k.s_c) == BTREE_HOOK_RESTART_TRANS) @@ -1352,7 +1352,7 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert, */ EBUG_ON(bkey_cmp(s->committed, k.k->p)); } else { - bch_bset_fix_invalidated_key(&b->keys, t, _k); + bch_bset_fix_invalidated_key(b, t, _k); bch_btree_node_iter_fix(iter, b, node_iter, t, _k, _k->u64s, _k->u64s); } @@ -1383,7 +1383,7 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert, bch_cut_subtract_front(iter, insert->k.p, k, &s->stats); BUG_ON(bkey_deleted(k.k)); - extent_save(&b->keys, node_iter, _k, k.k); + extent_save(b, node_iter, _k, k.k); bch_add_sectors(iter, bkey_i_to_s_c(&split.k), bkey_start_offset(&split.k.k), @@ -1415,9 +1415,9 @@ bch_delete_fixup_extent(struct extent_insert_state *s) while (bkey_cmp(s->committed, insert->k.p) < 0 && (ret = extent_insert_should_stop(s)) == BTREE_INSERT_OK && - (_k = bch_btree_node_iter_peek_all(node_iter, &b->keys))) { - struct bset_tree *t = bch_bkey_to_bset(&b->keys, _k); - struct bkey_s k = __bkey_disassemble(&b->keys, _k, &unpacked); + (_k = bch_btree_node_iter_peek_all(node_iter, b))) { + struct bset_tree *t = bch_bkey_to_bset(b, _k); + struct bkey_s k = __bkey_disassemble(b, _k, &unpacked); enum bch_extent_overlap overlap; EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k))); @@ -1450,8 +1450,8 @@ bch_delete_fixup_extent(struct extent_insert_state *s) s->do_journal = true; if (overlap == BCH_EXTENT_OVERLAP_ALL) { - btree_keys_account_key_drop(&b->keys.nr, - t - b->keys.set, _k); + btree_keys_account_key_drop(&b->nr, + t - b->set, _k); bch_subtract_sectors(iter, k.s_c, bkey_start_offset(k.k), k.k->size, &s->stats); @@ -1589,9 +1589,9 @@ bch_insert_fixup_extent(struct btree_insert *trans, while (bkey_cmp(s.committed, insert->k->k.p) < 0 && (ret = extent_insert_should_stop(&s)) == BTREE_INSERT_OK && - (_k = bch_btree_node_iter_peek_all(node_iter, &b->keys))) { - struct bset_tree *t = bch_bkey_to_bset(&b->keys, _k); - struct bkey_s k = __bkey_disassemble(&b->keys, _k, &unpacked); + (_k = bch_btree_node_iter_peek_all(node_iter, b))) { + struct bset_tree *t = bch_bkey_to_bset(b, _k); + struct bkey_s k = __bkey_disassemble(b, _k, &unpacked); enum bch_extent_overlap overlap; EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k))); @@ -2215,7 +2215,7 @@ void bch_extent_pick_ptr_avoiding(struct cache_set *c, struct bkey_s_c k, } static enum merge_result bch_extent_merge(struct cache_set *c, - struct btree_keys *bk, + struct btree *bk, struct bkey_i *l, struct bkey_i *r) { struct bkey_s_extent el, er; @@ -2301,7 +2301,7 @@ static enum merge_result bch_extent_merge(struct cache_set *c, return BCH_MERGE_MERGE; } -static void extent_i_save(struct btree_keys *b, struct btree_node_iter *iter, +static void extent_i_save(struct btree *b, struct btree_node_iter *iter, struct bkey_packed *dst, struct bkey_i *src) { struct bkey_format *f = &b->format; @@ -2336,8 +2336,8 @@ static bool extent_merge_one_overlapping(struct btree_iter *iter, return !bkey_packed(k) || could_pack; } else { uk.p = new_pos; - extent_save(&b->keys, node_iter, k, &uk); - bch_bset_fix_invalidated_key(&b->keys, t, k); + extent_save(b, node_iter, k, &uk); + bch_bset_fix_invalidated_key(b, t, k); bch_btree_node_iter_fix(iter, b, node_iter, t, k, k->u64s, k->u64s); return true; @@ -2347,7 +2347,7 @@ static bool extent_merge_one_overlapping(struct btree_iter *iter, static bool extent_merge_do_overlapping(struct btree_iter *iter, struct bkey *m, bool back_merge) { - struct btree_keys *b = &iter->nodes[0]->keys; + struct btree *b = iter->nodes[0]; struct btree_node_iter *node_iter = &iter->node_iters[0]; struct bset_tree *t; struct bkey_packed *k; @@ -2440,7 +2440,7 @@ static bool bch_extent_merge_inline(struct cache_set *c, struct bkey_packed *r, bool back_merge) { - struct btree_keys *b = &iter->nodes[0]->keys; + struct btree *b = iter->nodes[0]; struct btree_node_iter *node_iter = &iter->node_iters[0]; const struct bkey_format *f = &b->format; struct bset_tree *t = bset_tree_last(b); diff --git a/drivers/md/bcache/extents.h b/drivers/md/bcache/extents.h index 869a16523b52..f219b2e46419 100644 --- a/drivers/md/bcache/extents.h +++ b/drivers/md/bcache/extents.h @@ -12,11 +12,11 @@ struct btree_insert; struct btree_insert_entry; struct btree_nr_keys bch_key_sort_fix_overlapping(struct bset *, - struct btree_keys *, + struct btree *, struct btree_node_iter *); struct btree_nr_keys bch_extent_sort_fix_overlapping(struct cache_set *c, struct bset *, - struct btree_keys *, + struct btree *, struct btree_node_iter *); extern const struct bkey_ops bch_bkey_btree_ops; diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 985be520df52..d5248e296126 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -479,7 +479,7 @@ static int bch_bset_print_stats(struct cache_set *c, char *buf) rcu_read_lock(); for_each_cached_btree(b, c, tbl, iter, pos) { - bch_btree_keys_stats(&b->keys, &stats); + bch_btree_keys_stats(b, &stats); nodes++; } rcu_read_unlock(); @@ -525,7 +525,7 @@ lock_root: six_lock_read(&b->lock); } while (b != c->btree_roots[BTREE_ID_EXTENTS].b); - for_each_btree_node_key(&b->keys, k, &iter, btree_node_is_extents(b)) + for_each_btree_node_key(b, k, &iter, btree_node_is_extents(b)) bytes += bkey_bytes(k); six_unlock_read(&b->lock); @@ -540,7 +540,7 @@ static size_t bch_cache_size(struct cache_set *c) mutex_lock(&c->btree_cache_lock); list_for_each_entry(b, &c->btree_cache, list) - ret += 1 << (b->keys.page_order + PAGE_SHIFT); + ret += 1 << (b->page_order + PAGE_SHIFT); mutex_unlock(&c->btree_cache_lock); return ret; |