diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2016-11-17 08:45:59 -0900 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@gmail.com> | 2016-11-23 14:09:23 -0900 |
commit | 1f2be1b343cfee1874d4efb2814a99690167c1cb (patch) | |
tree | 4666cb16ee823ec769fd411f05604f5242d3d881 | |
parent | b6326f5ab4dd15c4dba7b265f1f3412929b1643d (diff) |
bcache: make b->nsets normal
-rw-r--r-- | drivers/md/bcache/bset.c | 60 | ||||
-rw-r--r-- | drivers/md/bcache/bset.h | 6 | ||||
-rw-r--r-- | drivers/md/bcache/btree_cache.c | 8 | ||||
-rw-r--r-- | drivers/md/bcache/btree_gc.c | 2 | ||||
-rw-r--r-- | drivers/md/bcache/btree_io.c | 16 | ||||
-rw-r--r-- | drivers/md/bcache/btree_iter.c | 2 | ||||
-rw-r--r-- | drivers/md/bcache/btree_update.c | 8 | ||||
-rw-r--r-- | drivers/md/bcache/extents.c | 5 |
8 files changed, 57 insertions, 50 deletions
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index 04a940487077..8b828b7ff145 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -24,7 +24,7 @@ struct bset_tree *bch_bkey_to_bset(struct btree_keys *b, struct bkey_packed *k) { struct bset_tree *t; - for (t = b->set; t <= b->set + b->nsets; t++) + for_each_bset(b, t) if (k >= t->data->start && k < bset_bkey_last(t->data)) return t; @@ -92,11 +92,11 @@ void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set) void bch_dump_btree_node(struct btree_keys *b) { - unsigned i; + struct bset_tree *t; console_lock(); - for (i = 0; i <= b->nsets; i++) - bch_dump_bset(b, b->set[i].data, i); + for_each_bset(b, t) + bch_dump_bset(b, t->data, t - b->set); console_unlock(); } @@ -105,7 +105,7 @@ void bch_dump_btree_node_iter(struct btree_keys *b, { struct btree_node_iter_set *set; - printk(KERN_ERR "btree node iter with %u sets:\n", b->nsets + 1); + printk(KERN_ERR "btree node iter with %u sets:\n", b->nsets); btree_node_iter_for_each(iter, set) { struct bkey_packed *k = __btree_node_offset_to_key(b, set->k); @@ -137,16 +137,16 @@ static bool keys_out_of_order(const struct bkey_format *f, void __bch_verify_btree_nr_keys(struct btree_keys *b) { - unsigned i; + struct bset_tree *t; struct bkey_packed *k; struct btree_nr_keys nr = { 0 }; - for (i = 0; i <= b->nsets; i++) - for (k = b->set[i].data->start; - k != bset_bkey_last(b->set[i].data); + for_each_bset(b, t) + for (k = t->data->start; + k != bset_bkey_last(t->data); k = bkey_next(k)) if (!bkey_packed_is_whiteout(b, k)) - btree_keys_account_key_add(&nr, i, k); + btree_keys_account_key_add(&nr, t - b->set, k); BUG_ON(memcmp(&nr, &b->nr, sizeof(nr))); } @@ -198,7 +198,7 @@ void bch_btree_node_iter_verify(struct btree_node_iter *iter, first = __btree_node_offset_to_key(b, iter->data[0].k); - for (t = b->set; t <= b->set + b->nsets; t++) + for_each_bset(b, t) if (bch_btree_node_iter_bset_pos(iter, b, t->data) == bset_bkey_last(t->data) && (k = bkey_prev_all(t, bset_bkey_last(t->data)))) @@ -231,7 +231,7 @@ void bch_verify_key_order(struct btree_keys *b, BUG_ON(k != bset_bkey_last(t->data) && keys_out_of_order(f, where, k, iter->is_extents)); - for (t = b->set; t <= b->set + b->nsets; t++) { + for_each_bset(b, t) { if (!t->data->u64s) continue; @@ -799,10 +799,11 @@ static void bch_bset_build_rw_aux_tree(struct btree_keys *b, struct bset_tree *t void bch_bset_init_first(struct btree_keys *b, struct bset *i) { - struct bset_tree *t = &b->set[0]; + struct bset_tree *t; BUG_ON(b->nsets); + t = &b->set[b->nsets++]; t->data = i; memset(i, 0, sizeof(*i)); get_random_bytes(&i->seq, sizeof(i->seq)); @@ -814,9 +815,10 @@ void bch_bset_init_first(struct btree_keys *b, struct bset *i) void bch_bset_init_next(struct btree_keys *b, struct bset *i) { struct bset_tree *t; - BUG_ON(b->nsets + 1 == MAX_BSETS); - t = &b->set[++b->nsets]; + BUG_ON(b->nsets >= MAX_BSETS); + + t = &b->set[b->nsets++]; t->data = i; memset(i, 0, sizeof(*i)); i->seq = b->set->data->seq; @@ -1111,7 +1113,7 @@ void bch_bset_insert(struct btree_keys *b, src = &packed; if (!bkey_is_whiteout(&insert->k)) - btree_keys_account_key_add(&b->nr, b->nsets, src); + btree_keys_account_key_add(&b->nr, t - b->set, src); if (src->u64s != clobber_u64s) { u64 *src_p = where->_data + clobber_u64s; @@ -1401,7 +1403,7 @@ void bch_btree_node_iter_init(struct btree_node_iter *iter, struct bset_tree *t; struct bkey_packed p, *packed_search, *lossy_packed_search; - BUG_ON(b->nsets + 1 > MAX_BSETS); + BUG_ON(b->nsets > MAX_BSETS); switch (bkey_pack_pos_lossy(&p, search, &b->format)) { case BKEY_PACK_POS_EXACT: @@ -1424,7 +1426,7 @@ void bch_btree_node_iter_init(struct btree_node_iter *iter, __bch_btree_node_iter_init(iter, is_extents); - for (t = b->set; t <= b->set + b->nsets; t++) + for_each_bset(b, t) __bch_btree_node_iter_push(iter, b, bch_bset_search(b, t, search, packed_search, @@ -1443,7 +1445,7 @@ void bch_btree_node_iter_init_from_start(struct btree_node_iter *iter, __bch_btree_node_iter_init(iter, is_extents); - for (t = b->set; t <= b->set + b->nsets; t++) + for_each_bset(b, t) __bch_btree_node_iter_push(iter, b, t->data->start, bset_bkey_last(t->data)); @@ -1547,7 +1549,7 @@ struct bkey_packed *bch_btree_node_iter_prev_all(struct btree_node_iter *iter, bch_btree_node_iter_verify(iter, b); - for (t = b->set; t <= b->set + b->nsets; t++) { + for_each_bset(b, t) { k = bkey_prev_all(t, bch_btree_node_iter_bset_pos(iter, b, t->data)); if (k && @@ -1745,13 +1747,12 @@ bool bch_maybe_compact_deleted_keys(struct btree_keys *b) { struct bset_tree *t, *rebuild_from = NULL; bool last_set_aux_tree_ro = bset_has_ro_aux_tree(bset_tree_last(b)); - unsigned idx; - for (idx = 0; idx <= b->nsets; idx++) { - struct bset *i = b->set[idx].data; + for_each_bset(b, t) { + struct bset *i = t->data; struct bkey_packed *k, *n, *out = i->start; - if (b->nr.bset_u64s[idx] * 4 > le16_to_cpu(i->u64s) * 3) + if (b->nr.bset_u64s[t - b->set] * 4 > le16_to_cpu(i->u64s) * 3) continue; /* @@ -1760,7 +1761,7 @@ bool bch_maybe_compact_deleted_keys(struct btree_keys *b) * * XXX unless they're extents, if we fix assertions elsewhere */ - if (idx == b->nsets && !last_set_aux_tree_ro) + if (t == bset_tree_last(b) && !last_set_aux_tree_ro) break; for (k = i->start; k != bset_bkey_last(i); k = n) { @@ -1775,13 +1776,13 @@ bool bch_maybe_compact_deleted_keys(struct btree_keys *b) i->u64s = cpu_to_le16((u64 *) out - i->_data); if (!rebuild_from) - rebuild_from = &b->set[idx]; + rebuild_from = t; } if (!rebuild_from) return false; - for (t = rebuild_from; t <= b->set + b->nsets; t++) { + for (t = rebuild_from; t < b->set + b->nsets; t++) { if (t == bset_tree_last(b) && !last_set_aux_tree_ro) bch_bset_build_rw_aux_tree(b, t); else @@ -1793,10 +1794,9 @@ bool bch_maybe_compact_deleted_keys(struct btree_keys *b) void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *stats) { - unsigned i; + struct bset_tree *t; - for (i = 0; i <= b->nsets; i++) { - struct bset_tree *t = &b->set[i]; + for_each_bset(b, t) { enum bset_aux_tree_type type = bset_aux_tree_type(t); size_t j; diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h index 48bd4c84ef3a..d3891db681fc 100644 --- a/drivers/md/bcache/bset.h +++ b/drivers/md/bcache/bset.h @@ -243,6 +243,9 @@ struct btree_keys { #endif }; +#define for_each_bset(_b, _t) \ + for (_t = (_b)->set; _t < (_b)->set + (_b)->nsets; _t++) + extern bool bch_expensive_debug_checks; static inline bool btree_keys_expensive_checks(struct btree_keys *b) @@ -256,7 +259,8 @@ static inline bool btree_keys_expensive_checks(struct btree_keys *b) static inline struct bset_tree *bset_tree_last(struct btree_keys *b) { - return b->set + b->nsets; + EBUG_ON(!b->nsets); + return b->set + b->nsets - 1; } static inline bool bset_has_ro_aux_tree(struct bset_tree *t) diff --git a/drivers/md/bcache/btree_cache.c b/drivers/md/bcache/btree_cache.c index 06a9921afbcd..52cd2105add4 100644 --- a/drivers/md/bcache/btree_cache.c +++ b/drivers/md/bcache/btree_cache.c @@ -607,8 +607,8 @@ struct btree *bch_btree_node_get(struct btree_iter *iter, const struct bkey_i *k, unsigned level, enum six_lock_type lock_type) { - int i = 0; struct btree *b; + struct bset_tree *t; BUG_ON(level >= BTREE_MAX_DEPTH); retry: @@ -676,9 +676,9 @@ retry: } } - for (; i <= b->keys.nsets; i++) { - prefetch(b->keys.set[i].tree); - prefetch(b->keys.set[i].data); + for_each_bset(&b->keys, t) { + prefetch(t->tree); + prefetch(t->data); } /* avoid atomic set bit if it's not needed: */ diff --git a/drivers/md/bcache/btree_gc.c b/drivers/md/bcache/btree_gc.c index f59563f5b2ef..8753067d684b 100644 --- a/drivers/md/bcache/btree_gc.c +++ b/drivers/md/bcache/btree_gc.c @@ -438,7 +438,7 @@ static void recalc_packed_keys(struct btree *b) memset(&b->keys.nr, 0, sizeof(b->keys.nr)); - BUG_ON(b->keys.nsets); + BUG_ON(b->keys.nsets != 1); for (k = b->keys.set[0].data->start; k != bset_bkey_last(b->keys.set[0].data); diff --git a/drivers/md/bcache/btree_io.c b/drivers/md/bcache/btree_io.c index c82cf93a01f8..26434bcca645 100644 --- a/drivers/md/bcache/btree_io.c +++ b/drivers/md/bcache/btree_io.c @@ -36,7 +36,7 @@ static void btree_node_sort(struct cache_set *c, struct btree *b, __bch_btree_node_iter_init(sort_iter, btree_node_is_extents(b)); for (t = b->keys.set + from; - t <= b->keys.set + b->keys.nsets; + t < b->keys.set + b->keys.nsets; t++) bch_btree_node_iter_push(sort_iter, &b->keys, t->data->start, @@ -129,7 +129,7 @@ static void btree_node_sort(struct cache_set *c, struct btree *b, } } - b->keys.nsets = from; + b->keys.nsets = from + 1; bch_bset_build_ro_aux_tree(&b->keys, &b->keys.set[from]); if (!is_write_locked) @@ -157,14 +157,14 @@ static bool btree_node_compact(struct cache_set *c, struct btree *b, int i = 0; /* Don't sort if nothing to do */ - if (!b->keys.nsets) + if (b->keys.nsets == 1) goto nosort; /* If not a leaf node, always sort */ if (b->level) goto sort; - for (i = b->keys.nsets - 1; i >= 0; --i) { + for (i = b->keys.nsets - 2; i >= 0; --i) { crit *= crit_factor; if (le16_to_cpu(b->keys.set[i].data->u64s) < crit) @@ -172,7 +172,7 @@ static bool btree_node_compact(struct cache_set *c, struct btree *b, } /* Sort if we'd overflow */ - if (b->keys.nsets + 1 == MAX_BSETS) { + if (b->keys.nsets == MAX_BSETS) { i = 0; goto sort; } @@ -206,7 +206,7 @@ void bch_btree_init_next(struct cache_set *c, struct btree *b, did_sort = btree_node_compact(c, b, iter); /* do verify if we sorted down to a single set: */ - if (did_sort && !b->keys.nsets) + if (did_sort && b->keys.nsets == 1) bch_btree_verify(c, b); if (b->written < c->sb.btree_node_size) { @@ -866,7 +866,7 @@ void bch_btree_node_flush_journal_entries(struct cache_set *c, struct btree *b, struct closure *cl) { - int i; + int i = b->keys.nsets; /* * Journal sequence numbers in the different bsets will always be in @@ -874,7 +874,7 @@ void bch_btree_node_flush_journal_entries(struct cache_set *c, * most recent bset might not have a journal sequence number yet, so we * need to loop: */ - for (i = b->keys.nsets; i >= 0; --i) { + while (i--) { u64 seq = le64_to_cpu(b->keys.set[i].data->journal_seq); if (seq) { diff --git a/drivers/md/bcache/btree_iter.c b/drivers/md/bcache/btree_iter.c index 315beca5f51d..d9d73ff2b976 100644 --- a/drivers/md/bcache/btree_iter.c +++ b/drivers/md/bcache/btree_iter.c @@ -410,7 +410,7 @@ found: struct bset_tree *t; struct bkey_packed *k; - for (t = b->keys.set; t <= b->keys.set + b->keys.nsets; t++) { + for_each_bset(&b->keys, t) { if (bch_bkey_to_bset(&b->keys, where) == t) continue; diff --git a/drivers/md/bcache/btree_update.c b/drivers/md/bcache/btree_update.c index 9e71e6ed3139..98e701a464c4 100644 --- a/drivers/md/bcache/btree_update.c +++ b/drivers/md/bcache/btree_update.c @@ -30,7 +30,7 @@ void __bch_btree_calc_format(struct bkey_format_state *s, struct btree *b) struct bset_tree *t; struct bkey uk; - for (t = b->keys.set; t <= b->keys.set + b->keys.nsets; t++) + for_each_bset(&b->keys, t) for (k = t->data->start; k != bset_bkey_last(t->data); k = bkey_next(k)) @@ -314,7 +314,7 @@ static void bch_btree_sort_into(struct cache_set *c, struct btree_node_iter iter; u64 start_time = local_clock(); - BUG_ON(dst->keys.nsets); + BUG_ON(dst->keys.nsets != 1); dst->keys.set[0].extra = BSET_NO_AUX_TREE_VAL; @@ -1042,7 +1042,7 @@ void bch_btree_interior_update_will_free_node(struct cache_set *c, * over the bset->journal_seq tracking, since we'll be mixing those keys * in with keys that aren't in the journal anymore: */ - for (t = b->keys.set; t <= b->keys.set + b->keys.nsets; t++) + for_each_bset(&b->keys, t) as->journal_seq = max(as->journal_seq, t->data->journal_seq); /* @@ -1338,7 +1338,7 @@ static void btree_split_insert_keys(struct btree_iter *iter, struct btree *b, } else p = bkey_next(p); - BUG_ON(b->keys.nsets || + BUG_ON(b->keys.nsets != 1 || b->keys.nr.live_u64s != le16_to_cpu(b->keys.set->data->u64s)); btree_node_interior_verify(b); diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c index 4be04a208a8c..c192a389dc87 100644 --- a/drivers/md/bcache/extents.c +++ b/drivers/md/bcache/extents.c @@ -2247,7 +2247,10 @@ static bool extent_merge_do_overlapping(struct btree_iter *iter, * But in the other bsets, we have to check for and fix such extents: */ do_fixup: - for (t = b->set; t < b->set + b->nsets; t++) { + for_each_bset(b, t) { + if (t == bset_tree_last(b)) + break; + if (!t->data->u64s) continue; |