diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2016-11-17 10:05:50 -0900 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@gmail.com> | 2016-11-23 14:09:23 -0900 |
commit | 49b02a1ff0c18c0a2facc7d981bde42efead0fed (patch) | |
tree | b7495c147487cbbfc878eafc98ac215ec4f96986 | |
parent | 1bed270cbfa27989e7652d2595e76bc5f959f229 (diff) |
bcache: More sort refactoring
reduce b->c usage
-rw-r--r-- | drivers/md/bcache/bcache.h | 2 | ||||
-rw-r--r-- | drivers/md/bcache/bkey_methods.h | 6 | ||||
-rw-r--r-- | drivers/md/bcache/bset.c | 135 | ||||
-rw-r--r-- | drivers/md/bcache/bset.h | 10 | ||||
-rw-r--r-- | drivers/md/bcache/btree_io.c | 329 | ||||
-rw-r--r-- | drivers/md/bcache/btree_io.h | 2 | ||||
-rw-r--r-- | drivers/md/bcache/btree_update.c | 31 | ||||
-rw-r--r-- | drivers/md/bcache/extents.c | 62 | ||||
-rw-r--r-- | drivers/md/bcache/extents.h | 3 | ||||
-rw-r--r-- | drivers/md/bcache/super.c | 4 |
10 files changed, 301 insertions, 283 deletions
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index ae2efdeedd24..f6b6c4e68192 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -755,7 +755,7 @@ struct cache_set { */ mempool_t fill_iter; - mempool_t btree_sort_pool; + mempool_t btree_bounce_pool; struct journal journal; diff --git a/drivers/md/bcache/bkey_methods.h b/drivers/md/bcache/bkey_methods.h index f8c7f42aba7e..19746aec3fb3 100644 --- a/drivers/md/bcache/bkey_methods.h +++ b/drivers/md/bcache/bkey_methods.h @@ -41,8 +41,10 @@ enum merge_result { BCH_MERGE_MERGE, }; -typedef bool (*key_filter_fn)(struct btree_keys *, struct bkey_s); -typedef enum merge_result (*key_merge_fn)(struct btree_keys *, +typedef bool (*key_filter_fn)(struct cache_set *, struct btree_keys *, + struct bkey_s); +typedef enum merge_result (*key_merge_fn)(struct cache_set *, + struct btree_keys *, struct bkey_i *, struct bkey_i *); struct bkey_ops { diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index 1006a8d63cd3..0b581b9203f8 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -1612,141 +1612,6 @@ struct bkey_s_c bch_btree_node_iter_peek_unpack(struct btree_node_iter *iter, } EXPORT_SYMBOL(bch_btree_node_iter_peek_unpack); -/* Mergesort */ - -/* No repacking: */ -static struct btree_nr_keys btree_mergesort_simple(struct bset *dst, - struct btree_keys *src, - struct btree_node_iter *src_iter) -{ - struct bkey_packed *in, *out = bset_bkey_last(dst); - struct btree_nr_keys nr; - - memset(&nr, 0, sizeof(nr)); - - while ((in = bch_btree_node_iter_next_all(src_iter, src))) { - if (!bkey_packed_is_whiteout(src, in)) { - bkey_copy(out, in); - btree_keys_account_key_add(&nr, 0, out); - out = bkey_next(out); - } - } - - dst->u64s = cpu_to_le16((u64 *) out - dst->_data); - return nr; -} - -/* Sort + repack in a new format: */ -static struct btree_nr_keys btree_mergesort(struct bset *dst, - struct btree_keys *src, - struct btree_node_iter *src_iter, - struct bkey_format *in_f, - struct bkey_format *out_f, - key_filter_fn filter) -{ - struct bkey_packed *in, *out = bset_bkey_last(dst); - struct btree_nr_keys nr; - - memset(&nr, 0, sizeof(nr)); - - while ((in = bch_btree_node_iter_next_all(src_iter, src))) { - if (bkey_packed_is_whiteout(src, in)) - continue; - - if (bch_bkey_transform(out_f, out, bkey_packed(in) - ? in_f : &bch_bkey_format_current, in)) - out->format = KEY_FORMAT_LOCAL_BTREE; - else - bkey_unpack((void *) out, in_f, in); - - btree_keys_account_key_add(&nr, 0, out); - out = bkey_next(out); - } - - dst->u64s = cpu_to_le16((u64 *) out - dst->_data); - return nr; -} - -/* Sort, repack, and merge: */ -static struct btree_nr_keys btree_mergesort_extents(struct bset *dst, - struct btree_keys *src, - struct btree_node_iter *iter, - struct bkey_format *in_f, - struct bkey_format *out_f, - key_filter_fn filter, - key_merge_fn merge) -{ - struct bkey_packed *k, *prev = NULL, *out; - struct btree_nr_keys nr; - BKEY_PADDED(k) tmp; - - memset(&nr, 0, sizeof(nr)); - - while ((k = bch_btree_node_iter_next_all(iter, src))) { - if (bkey_packed_is_whiteout(src, k)) - continue; - - /* - * The filter might modify pointers, so we have to unpack the - * key and values to &tmp.k: - */ - bkey_unpack(&tmp.k, in_f, k); - - if (filter && filter(src, bkey_i_to_s(&tmp.k))) - continue; - - /* prev is always unpacked, for key merging: */ - - if (prev && - merge && - merge(src, (void *) prev, &tmp.k) == BCH_MERGE_MERGE) - continue; - - /* - * the current key becomes the new prev: advance prev, then - * copy the current key - but first pack prev (in place): - */ - if (prev) { - bkey_pack(prev, (void *) prev, out_f); - - btree_keys_account_key_add(&nr, 0, prev); - prev = bkey_next(prev); - } else { - prev = bset_bkey_last(dst); - } - - bkey_copy(prev, &tmp.k); - } - - if (prev) { - bkey_pack(prev, (void *) prev, out_f); - btree_keys_account_key_add(&nr, 0, prev); - out = bkey_next(prev); - } else { - out = bset_bkey_last(dst); - } - - dst->u64s = cpu_to_le16((u64 *) out - dst->_data); - return nr; -} - -struct btree_nr_keys bch_sort_bsets(struct bset *dst, - struct btree_keys *src, - struct btree_node_iter *src_iter, - struct bkey_format *in_f, - struct bkey_format *out_f, - key_filter_fn filter, - key_merge_fn merge) -{ - if (merge) - return btree_mergesort_extents(dst, src, src_iter, - in_f, out_f, filter, merge); - else if (memcmp(in_f, out_f, sizeof(*in_f))) - return btree_mergesort(dst, src, src_iter, in_f, out_f, filter); - else - return btree_mergesort_simple(dst, src, src_iter); -} - bool bch_maybe_compact_deleted_keys(struct btree_keys *b) { struct bset_tree *t, *rebuild_from = NULL; diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h index d268e58d6d89..aba5ae3f99ed 100644 --- a/drivers/md/bcache/bset.h +++ b/drivers/md/bcache/bset.h @@ -597,16 +597,6 @@ static inline void btree_keys_account_key(struct btree_nr_keys *n, #define btree_keys_account_key_drop(_nr, _bset_idx, _k) \ btree_keys_account_key(_nr, _bset_idx, _k, -1) -/* Sorting */ - -struct btree_nr_keys bch_sort_bsets(struct bset *, - struct btree_keys *, - struct btree_node_iter *, - struct bkey_format *, - struct bkey_format *, - key_filter_fn, - key_merge_fn); - bool bch_maybe_compact_deleted_keys(struct btree_keys *); struct bset_stats { diff --git a/drivers/md/bcache/btree_io.c b/drivers/md/bcache/btree_io.c index 19ed5f085ab4..53993699874a 100644 --- a/drivers/md/bcache/btree_io.c +++ b/drivers/md/bcache/btree_io.c @@ -16,32 +16,79 @@ #include <trace/events/bcache.h> +static void btree_bounce_free(struct cache_set *c, unsigned order, + bool used_mempool, void *p) +{ + if (used_mempool) + mempool_free(virt_to_page(p), &c->btree_bounce_pool); + else + free_pages((unsigned long) p, order); +} + +static void *btree_bounce_alloc(struct cache_set *c, unsigned order, + bool *used_mempool) +{ + void *p; + + BUG_ON(1 << order > btree_pages(c)); + + *used_mempool = false; + p = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOWAIT, order); + if (p) + return p; + + *used_mempool = true; + return page_address(mempool_alloc(&c->btree_bounce_pool, GFP_NOIO)); +} + +static unsigned sort_keys(struct bkey_packed *dst, + struct btree_keys *b, + struct btree_node_iter *iter, + bool filter_whiteouts, + bool filter_deleted, + bool filter_dups) +{ + struct bkey_packed *in, *next, *out = dst; + + while ((in = bch_btree_node_iter_next_all(iter, b))) { + if (filter_dups && + (next = bch_btree_node_iter_peek_all(iter, b)) && + !bkey_cmp_packed(&b->format, in, next)) + continue; + + if (filter_whiteouts && bkey_packed_is_whiteout(b, in)) + continue; + + if (filter_deleted && bkey_deleted(in)) + continue; + + bkey_copy(out, in); + out = bkey_next(out); + } + + return (u64 *) out - (u64 *) dst; +} + static void btree_node_sort(struct cache_set *c, struct btree *b, struct btree_iter *iter, unsigned from, - struct btree_node_iter *sort_iter, - sort_fix_overlapping_fn sort_fn, bool is_write_locked) { struct btree_node *out; - struct btree_nr_keys nr; - struct btree_node_iter _sort_iter; + struct btree_node_iter sort_iter; + struct bset_tree *t; bool used_mempool = false; u64 start_time; - unsigned order; + unsigned i, u64s, order, shift = b->keys.nsets - from - 1; + bool sorting_entire_node = from == 0; - if (!sort_iter) { - struct bset_tree *t; + __bch_btree_node_iter_init(&sort_iter, btree_node_is_extents(b)); - sort_iter = &_sort_iter; - __bch_btree_node_iter_init(sort_iter, btree_node_is_extents(b)); - - for (t = b->keys.set + from; - t < b->keys.set + b->keys.nsets; - t++) - bch_btree_node_iter_push(sort_iter, &b->keys, - t->data->start, - bset_bkey_last(t->data)); - } + for (t = b->keys.set + from; + t < b->keys.set + b->keys.nsets; + t++) + bch_btree_node_iter_push(&sort_iter, &b->keys, + t->data->start, + bset_bkey_last(t->data)); if (!from) { order = b->keys.page_order; @@ -49,46 +96,23 @@ static void btree_node_sort(struct cache_set *c, struct btree *b, struct btree_node_iter_set *set; unsigned u64s = 0; - btree_node_iter_for_each(sort_iter, set) + btree_node_iter_for_each(&sort_iter, set) u64s += set->end - set->k; order = get_order(__set_bytes(b->data, u64s)); } - out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOWAIT, order); - if (!out) { - struct page *outp; - - outp = mempool_alloc(&c->btree_sort_pool, GFP_NOIO); - out = page_address(outp); - used_mempool = true; - } + out = btree_bounce_alloc(c, order, &used_mempool); start_time = local_clock(); - out->keys.u64s = 0; + u64s = sort_keys(out->keys.start, + &b->keys, &sort_iter, + bset_written(b, b->keys.set[from].data), + btree_node_is_extents(b), + !btree_node_is_extents(b)); - /* - * The nr_keys accounting for number of packed/unpacked keys isn't - * broken out by bset, which means we can't merge extents unless we're - * sorting the entire node (we'd have to recalculate nr_keys for the - * entire node). Also, extent merging is problematic if we're not - * sorting the entire node, since we'd end up with extents overlapping - * with 0 length whiteouts in other bsets we didn't sort. - */ - if (sort_fn) - nr = sort_fn(&out->keys, &b->keys, sort_iter); - else if (!from) - nr = bch_sort_bsets(&out->keys, &b->keys, sort_iter, - &b->keys.format, - &b->keys.format, - NULL, - btree_node_ops(b)->key_merge); - else - nr = bch_sort_bsets(&out->keys, &b->keys, sort_iter, - &b->keys.format, - &b->keys.format, - NULL, NULL); + out->keys.u64s = cpu_to_le16(u64s); BUG_ON((void *) bset_bkey_last(&out->keys) > (void *) out + (PAGE_SIZE << order)); @@ -99,7 +123,15 @@ static void btree_node_sort(struct cache_set *c, struct btree *b, if (!is_write_locked) __btree_node_lock_write(b, iter); - if (!from) { + /* Make sure we preserve bset journal_seq: */ + for (t = b->keys.set + from + 1; + t < b->keys.set + b->keys.nsets; + t++) + b->keys.set[from].data->journal_seq = + max(b->keys.set[from].data->journal_seq, + t->data->journal_seq); + + if (sorting_entire_node) { unsigned u64s = le16_to_cpu(out->keys.u64s); BUG_ON(order != b->keys.page_order); @@ -113,36 +145,174 @@ static void btree_node_sort(struct cache_set *c, struct btree *b, out->keys.u64s = cpu_to_le16(u64s); swap(out, b->data); b->keys.set->data = &b->data->keys; - b->keys.nr = nr; } else { - unsigned i; - b->keys.set[from].data->u64s = out->keys.u64s; - memcpy(b->keys.set[from].data->start, out->keys.start, - (void *) bset_bkey_last(&out->keys) - - (void *) out->keys.start); - - for (i = from + 1; i < MAX_BSETS; i++) { - b->keys.nr.bset_u64s[from] += - b->keys.nr.bset_u64s[i]; - b->keys.nr.bset_u64s[i] = 0; - } + memcpy_u64s(b->keys.set[from].data->start, + out->keys.start, + le16_to_cpu(out->keys.u64s)); + } + + for (i = from + 1; i < b->keys.nsets; i++) + b->keys.nr.bset_u64s[from] += + b->keys.nr.bset_u64s[i]; + + b->keys.nsets -= shift; + + for (i = from + 1; i < b->keys.nsets; i++) { + b->keys.nr.bset_u64s[i] = b->keys.nr.bset_u64s[i + shift]; + b->keys.set[i] = b->keys.set[i + shift]; } - b->keys.nsets = from + 1; + for (i = b->keys.nsets; i < MAX_BSETS; i++) + b->keys.nr.bset_u64s[i] = 0; + bch_bset_set_no_aux_tree(&b->keys, &b->keys.set[from]); if (!is_write_locked) __btree_node_unlock_write(b, iter); - if (used_mempool) - mempool_free(virt_to_page(out), &c->btree_sort_pool); - else - free_pages((unsigned long) out, order); + btree_bounce_free(c, order, used_mempool, out); bch_verify_btree_nr_keys(&b->keys); } +/* Sort + repack in a new format: */ +static struct btree_nr_keys sort_repack(struct bset *dst, + struct btree_keys *src, + struct btree_node_iter *src_iter, + struct bkey_format *in_f, + struct bkey_format *out_f, + bool filter_whiteouts) +{ + struct bkey_packed *in, *out = bset_bkey_last(dst); + struct btree_nr_keys nr; + + memset(&nr, 0, sizeof(nr)); + + while ((in = bch_btree_node_iter_next_all(src_iter, src))) { + if (filter_whiteouts && bkey_packed_is_whiteout(src, in)) + continue; + + if (bch_bkey_transform(out_f, out, bkey_packed(in) + ? in_f : &bch_bkey_format_current, in)) + out->format = KEY_FORMAT_LOCAL_BTREE; + else + bkey_unpack((void *) out, in_f, in); + + btree_keys_account_key_add(&nr, 0, out); + out = bkey_next(out); + } + + dst->u64s = cpu_to_le16((u64 *) out - dst->_data); + return nr; +} + +/* Sort, repack, and merge: */ +static struct btree_nr_keys sort_repack_merge(struct cache_set *c, + struct bset *dst, + struct btree_keys *src, + struct btree_node_iter *iter, + struct bkey_format *in_f, + struct bkey_format *out_f, + bool filter_whiteouts, + key_filter_fn filter, + key_merge_fn merge) +{ + struct bkey_packed *k, *prev = NULL, *out; + struct btree_nr_keys nr; + BKEY_PADDED(k) tmp; + + memset(&nr, 0, sizeof(nr)); + + while ((k = bch_btree_node_iter_next_all(iter, src))) { + if (filter_whiteouts && bkey_packed_is_whiteout(src, k)) + continue; + + /* + * The filter might modify pointers, so we have to unpack the + * key and values to &tmp.k: + */ + bkey_unpack(&tmp.k, in_f, k); + + if (filter && filter(c, src, bkey_i_to_s(&tmp.k))) + continue; + + /* prev is always unpacked, for key merging: */ + + if (prev && + merge && + merge(c, src, (void *) prev, &tmp.k) == BCH_MERGE_MERGE) + continue; + + /* + * the current key becomes the new prev: advance prev, then + * copy the current key - but first pack prev (in place): + */ + if (prev) { + bkey_pack(prev, (void *) prev, out_f); + + btree_keys_account_key_add(&nr, 0, prev); + prev = bkey_next(prev); + } else { + prev = bset_bkey_last(dst); + } + + bkey_copy(prev, &tmp.k); + } + + if (prev) { + bkey_pack(prev, (void *) prev, out_f); + btree_keys_account_key_add(&nr, 0, prev); + out = bkey_next(prev); + } else { + out = bset_bkey_last(dst); + } + + dst->u64s = cpu_to_le16((u64 *) out - dst->_data); + return nr; +} + +void bch_btree_sort_into(struct cache_set *c, + struct btree *dst, + struct btree *src) +{ + struct btree_nr_keys nr; + struct btree_node_iter src_iter; + u64 start_time = local_clock(); + + BUG_ON(dst->keys.nsets != 1); + + bch_bset_set_no_aux_tree(&dst->keys, dst->keys.set); + + bch_btree_node_iter_init_from_start(&src_iter, &src->keys, + btree_node_is_extents(src)); + + if (btree_node_ops(src)->key_normalize || + btree_node_ops(src)->key_merge) + nr = sort_repack_merge(c, dst->keys.set->data, + &src->keys, &src_iter, + &src->keys.format, + &dst->keys.format, + true, + btree_node_ops(src)->key_normalize, + btree_node_ops(src)->key_merge); + else + nr = sort_repack(dst->keys.set->data, + &src->keys, &src_iter, + &src->keys.format, + &dst->keys.format, + true); + + bch_time_stats_update(&c->btree_sort_time, start_time); + + dst->keys.nr.live_u64s += nr.live_u64s; + dst->keys.nr.bset_u64s[0] += nr.bset_u64s[0]; + dst->keys.nr.packed_keys += nr.packed_keys; + dst->keys.nr.unpacked_keys += nr.unpacked_keys; + + bch_verify_btree_nr_keys(&dst->keys); +} + #define SORT_CRIT (4096 / sizeof(u64)) /* @@ -179,7 +349,7 @@ static bool btree_node_compact(struct cache_set *c, struct btree *b, return false; sort: - btree_node_sort(c, b, iter, i, NULL, NULL, false); + btree_node_sort(c, b, iter, i, false); return true; } @@ -351,6 +521,9 @@ void bch_btree_node_read_done(struct cache_set *c, struct btree *b, struct btree_node_entry *bne; struct bset *i = &b->data->keys; struct btree_node_iter *iter; + struct btree_node *sorted; + bool used_mempool; + unsigned u64s; const char *err; int ret; @@ -458,11 +631,23 @@ void bch_btree_node_read_done(struct cache_set *c, struct btree *b, if (bne->keys.seq == b->data->keys.seq) goto err; - btree_node_sort(c, b, NULL, 0, iter, - btree_node_is_extents(b) - ? bch_extent_sort_fix_overlapping - : bch_key_sort_fix_overlapping, - true); + sorted = btree_bounce_alloc(c, ilog2(btree_pages(c)), &used_mempool); + sorted->keys.u64s = 0; + + b->keys.nr = btree_node_is_extents(b) + ? bch_extent_sort_fix_overlapping(c, &sorted->keys, &b->keys, iter) + : bch_key_sort_fix_overlapping(&sorted->keys, &b->keys, iter); + + u64s = le16_to_cpu(sorted->keys.u64s); + *sorted = *b->data; + sorted->keys.u64s = cpu_to_le16(u64s); + swap(sorted, b->data); + b->keys.set->data = &b->data->keys; + b->keys.nsets = 1; + + BUG_ON(b->keys.nr.live_u64s != u64s); + + btree_bounce_free(c, ilog2(btree_pages(c)), used_mempool, sorted); bch_bset_build_aux_tree(&b->keys, b->keys.set, false); diff --git a/drivers/md/bcache/btree_io.h b/drivers/md/bcache/btree_io.h index 9fe2ec488537..75e514105e85 100644 --- a/drivers/md/bcache/btree_io.h +++ b/drivers/md/bcache/btree_io.h @@ -19,6 +19,8 @@ static inline void btree_node_io_lock(struct btree *b) TASK_UNINTERRUPTIBLE); } +void bch_btree_sort_into(struct cache_set *, struct btree *, struct btree *); + void bch_btree_build_aux_trees(struct btree *); void bch_btree_init_next(struct cache_set *, struct btree *, struct btree_iter *); diff --git a/drivers/md/bcache/btree_update.c b/drivers/md/bcache/btree_update.c index 75d5ed086546..d9a0d5bf3b41 100644 --- a/drivers/md/bcache/btree_update.c +++ b/drivers/md/bcache/btree_update.c @@ -307,37 +307,6 @@ static struct btree *bch_btree_node_alloc(struct cache_set *c, return b; } -static void bch_btree_sort_into(struct cache_set *c, - struct btree *dst, - struct btree *src) -{ - struct btree_nr_keys nr; - struct btree_node_iter iter; - u64 start_time = local_clock(); - - BUG_ON(dst->keys.nsets != 1); - - bch_bset_set_no_aux_tree(&dst->keys, &dst->keys.set[0]); - - bch_btree_node_iter_init_from_start(&iter, &src->keys, - btree_node_is_extents(src)); - - nr = bch_sort_bsets(dst->keys.set->data, - &src->keys, &iter, - &src->keys.format, - &dst->keys.format, - btree_node_ops(src)->key_normalize, - btree_node_ops(src)->key_merge); - bch_time_stats_update(&c->btree_sort_time, start_time); - - dst->keys.nr.live_u64s += nr.live_u64s; - dst->keys.nr.bset_u64s[0] += nr.bset_u64s[0]; - dst->keys.nr.packed_keys += nr.packed_keys; - dst->keys.nr.unpacked_keys += nr.unpacked_keys; - - bch_verify_btree_nr_keys(&dst->keys); -} - struct btree *__btree_node_alloc_replacement(struct cache_set *c, struct btree *b, struct bkey_format format, diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c index c192a389dc87..c25d90b23c31 100644 --- a/drivers/md/bcache/extents.c +++ b/drivers/md/bcache/extents.c @@ -22,7 +22,7 @@ #include <trace/events/bcache.h> static bool __bch_extent_normalize(struct cache_set *, struct bkey_s, bool); -static enum merge_result bch_extent_merge(struct btree_keys *, +static enum merge_result bch_extent_merge(struct cache_set *, struct btree_keys *, struct bkey_i *, struct bkey_i *); static void sort_key_next(struct btree_node_iter *iter, @@ -339,11 +339,10 @@ static void bch_extent_drop_stale(struct cache_set *c, struct bkey_s_extent e) bch_extent_drop_redundant_crcs(e); } -static bool bch_ptr_normalize(struct btree_keys *bk, struct bkey_s k) +static bool bch_ptr_normalize(struct cache_set *c, struct btree_keys *bk, + struct bkey_s k) { - struct btree *b = container_of(bk, struct btree, keys); - - return __bch_extent_normalize(b->c, k, false); + return __bch_extent_normalize(c, k, false); } static void bch_ptr_swab(const struct bkey_format *f, struct bkey_packed *k) @@ -753,7 +752,8 @@ static inline void extent_sort_next(struct btree_node_iter *iter, heap_sift(iter, i - iter->data, extent_sort_cmp); } -static void extent_sort_append(struct btree_keys *b, +static void extent_sort_append(struct cache_set *c, + struct btree_keys *b, struct btree_nr_keys *nr, struct bkey_packed *start, struct bkey_packed **prev, @@ -768,7 +768,7 @@ static void extent_sort_append(struct btree_keys *b, bkey_unpack(&tmp.k, f, k); if (*prev && - bch_extent_merge(b, (void *) *prev, &tmp.k)) + bch_extent_merge(c, b, (void *) *prev, &tmp.k)) return; if (*prev) { @@ -783,7 +783,8 @@ static void extent_sort_append(struct btree_keys *b, bkey_copy(*prev, &tmp.k); } -struct btree_nr_keys bch_extent_sort_fix_overlapping(struct bset *dst, +struct btree_nr_keys bch_extent_sort_fix_overlapping(struct cache_set *c, + struct bset *dst, struct btree_keys *b, struct btree_node_iter *iter) { @@ -802,7 +803,7 @@ struct btree_nr_keys bch_extent_sort_fix_overlapping(struct bset *dst, lk = __btree_node_offset_to_key(b, _l->k); if (iter->used == 1) { - extent_sort_append(b, &nr, dst->start, &prev, lk); + extent_sort_append(c, b, &nr, dst->start, &prev, lk); extent_sort_next(iter, b, _l); continue; } @@ -819,7 +820,7 @@ struct btree_nr_keys bch_extent_sort_fix_overlapping(struct bset *dst, /* If current key and next key don't overlap, just append */ if (bkey_cmp(l.k->p, bkey_start_pos(r.k)) <= 0) { - extent_sort_append(b, &nr, dst->start, &prev, lk); + extent_sort_append(c, b, &nr, dst->start, &prev, lk); extent_sort_next(iter, b, _l); continue; } @@ -864,8 +865,8 @@ struct btree_nr_keys bch_extent_sort_fix_overlapping(struct bset *dst, extent_sort_sift(iter, b, 0); - extent_sort_append(b, &nr, dst->start, &prev, - bkey_to_packed(&tmp.k)); + extent_sort_append(c, b, &nr, dst->start, &prev, + bkey_to_packed(&tmp.k)); } else { bch_cut_back(bkey_start_pos(r.k), l.k); extent_save(b, NULL, lk, l.k); @@ -1055,7 +1056,8 @@ enum extent_insert_hook_ret bch_extent_cmpxchg(struct extent_insert_hook *hook, } } -static bool bch_extent_merge_inline(struct btree_iter *iter, +static bool bch_extent_merge_inline(struct cache_set *, + struct btree_iter *, struct bkey_packed *, struct bkey_packed *, bool); @@ -1095,7 +1097,8 @@ static enum btree_insert_ret extent_insert_should_stop(struct btree_insert *tran return BTREE_INSERT_OK; } -static void extent_bset_insert(struct btree_iter *iter, struct bkey_i *insert) +static void extent_bset_insert(struct cache_set *c, struct btree_iter *iter, + struct bkey_i *insert) { struct btree *b = iter->nodes[0]; struct btree_node_iter *node_iter = &iter->node_iters[0]; @@ -1120,11 +1123,11 @@ static void extent_bset_insert(struct btree_iter *iter, struct bkey_i *insert) clobber_u64s = (u64 *) next_live_key - (u64 *) where; if (prev && - bch_extent_merge_inline(iter, prev, bkey_to_packed(insert), true)) + bch_extent_merge_inline(c, iter, prev, bkey_to_packed(insert), true)) goto drop_deleted_keys; if (next_live_key != bset_bkey_last(t->data) && - bch_extent_merge_inline(iter, bkey_to_packed(insert), + bch_extent_merge_inline(c, iter, bkey_to_packed(insert), next_live_key, false)) goto drop_deleted_keys; @@ -1137,12 +1140,13 @@ drop_deleted_keys: bch_btree_node_iter_fix(iter, b, node_iter, t, where, clobber_u64s, 0); } -static void extent_insert_and_journal(struct btree_iter *iter, struct bkey_i *insert, +static void extent_insert_and_journal(struct cache_set *c, struct btree_iter *iter, + struct bkey_i *insert, struct journal_res *res) { bch_btree_journal_key(iter, insert, res); - extent_bset_insert(iter, insert); + extent_bset_insert(c, iter, insert); } static void extent_insert_committed(struct btree_insert *trans, @@ -1151,6 +1155,7 @@ static void extent_insert_committed(struct btree_insert *trans, struct journal_res *res, struct bucket_stats_cache_set *stats) { + struct cache_set *c = trans->c; struct btree_iter *iter = insert->iter; EBUG_ON(bkey_cmp(bkey_start_pos(&insert->k->k), iter->pos)); @@ -1166,8 +1171,7 @@ static void extent_insert_committed(struct btree_insert *trans, if (!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY) && bkey_cmp(committed_pos, insert->k->k.p) && - bkey_extent_is_compressed(trans->c, - bkey_i_to_s_c(insert->k))) { + bkey_extent_is_compressed(c, bkey_i_to_s_c(insert->k))) { /* XXX: possibly need to increase our reservation? */ bch_cut_subtract_back(iter, committed_pos, bkey_i_to_s(&split.k), stats); @@ -1180,11 +1184,11 @@ static void extent_insert_committed(struct btree_insert *trans, bch_cut_front(committed_pos, insert->k); } - if (debug_check_bkeys(iter->c)) - bkey_debugcheck(iter->c, iter->nodes[iter->level], + if (debug_check_bkeys(c)) + bkey_debugcheck(c, iter->nodes[iter->level], bkey_i_to_s_c(&split.k)); - extent_insert_and_journal(iter, &split.k, res); + extent_insert_and_journal(c, iter, &split.k, res); bch_btree_iter_set_pos_same_leaf(iter, committed_pos); @@ -1513,7 +1517,7 @@ bch_insert_fixup_extent(struct btree_insert *trans, bch_add_sectors(iter, bkey_i_to_s_c(&split.k), bkey_start_offset(&split.k.k), split.k.k.size, &stats); - extent_bset_insert(iter, &split.k); + extent_bset_insert(c, iter, &split.k); break; } } @@ -2095,11 +2099,10 @@ void bch_extent_pick_ptr_avoiding(struct cache_set *c, struct bkey_s_c k, } } -static enum merge_result bch_extent_merge(struct btree_keys *bk, +static enum merge_result bch_extent_merge(struct cache_set *c, + struct btree_keys *bk, struct bkey_i *l, struct bkey_i *r) { - struct btree *b = container_of(bk, struct btree, keys); - struct cache_set *c = b->c; struct bkey_s_extent el, er; union bch_extent_entry *en_l, *en_r; @@ -2315,7 +2318,8 @@ do_fixup: * * Also unpacks and repacks. */ -static bool bch_extent_merge_inline(struct btree_iter *iter, +static bool bch_extent_merge_inline(struct cache_set *c, + struct btree_iter *iter, struct bkey_packed *l, struct bkey_packed *r, bool back_merge) @@ -2343,7 +2347,7 @@ static bool bch_extent_merge_inline(struct btree_iter *iter, /* l & r should be in last bset: */ EBUG_ON(bch_bkey_to_bset(b, m) != t); - switch (bch_extent_merge(b, &li.k, &ri.k)) { + switch (bch_extent_merge(c, b, &li.k, &ri.k)) { case BCH_MERGE_NOMERGE: return false; case BCH_MERGE_PARTIAL: diff --git a/drivers/md/bcache/extents.h b/drivers/md/bcache/extents.h index f3eb2a73e167..ccfb0d165401 100644 --- a/drivers/md/bcache/extents.h +++ b/drivers/md/bcache/extents.h @@ -14,7 +14,8 @@ struct btree_insert_entry; struct btree_nr_keys bch_key_sort_fix_overlapping(struct bset *, struct btree_keys *, struct btree_node_iter *); -struct btree_nr_keys bch_extent_sort_fix_overlapping(struct bset *, +struct btree_nr_keys bch_extent_sort_fix_overlapping(struct cache_set *c, + struct bset *, struct btree_keys *, struct btree_node_iter *); diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 52abe0aabcf6..23a6ccdecf77 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -897,7 +897,7 @@ static void cache_set_free(struct cache_set *c) bdi_destroy(&c->bdi); free_percpu(c->bucket_stats_lock.lock); free_percpu(c->bucket_stats_percpu); - mempool_exit(&c->btree_sort_pool); + mempool_exit(&c->btree_bounce_pool); mempool_exit(&c->bio_bounce_pages); bioset_exit(&c->bio_write); bioset_exit(&c->bio_read_split); @@ -1161,7 +1161,7 @@ static struct cache_set *bch_cache_set_alloc(struct cache_sb *sb, PAGE_SECTORS, 0) || !(c->bucket_stats_percpu = alloc_percpu(struct bucket_stats_cache_set)) || !(c->bucket_stats_lock.lock = alloc_percpu(*c->bucket_stats_lock.lock)) || - mempool_init_page_pool(&c->btree_sort_pool, 1, + mempool_init_page_pool(&c->btree_bounce_pool, 1, ilog2(btree_pages(c))) || bdi_setup_and_register(&c->bdi, "bcache") || bch_io_clock_init(&c->io_clock[READ]) || |