diff options
-rw-r--r-- | drivers/md/bcache/bkey_methods.c | 20 | ||||
-rw-r--r-- | drivers/md/bcache/bkey_methods.h | 12 | ||||
-rw-r--r-- | drivers/md/bcache/btree.c | 357 | ||||
-rw-r--r-- | drivers/md/bcache/btree.h | 39 | ||||
-rw-r--r-- | drivers/md/bcache/debug.c | 8 | ||||
-rw-r--r-- | drivers/md/bcache/debug.h | 4 | ||||
-rw-r--r-- | drivers/md/bcache/extents.c | 124 | ||||
-rw-r--r-- | drivers/md/bcache/extents.h | 17 | ||||
-rw-r--r-- | drivers/md/bcache/gc.c | 16 | ||||
-rw-r--r-- | drivers/md/bcache/inode.c | 4 | ||||
-rw-r--r-- | drivers/md/bcache/io.c | 2 | ||||
-rw-r--r-- | drivers/md/bcache/migrate.c | 6 | ||||
-rw-r--r-- | drivers/md/bcache/request.c | 2 |
13 files changed, 318 insertions, 293 deletions
diff --git a/drivers/md/bcache/bkey_methods.c b/drivers/md/bcache/bkey_methods.c index a89d192e7cf0..90e1c9e7df38 100644 --- a/drivers/md/bcache/bkey_methods.c +++ b/drivers/md/bcache/bkey_methods.c @@ -43,7 +43,7 @@ bool bkey_invalid(struct cache_set *c, } } -void bkey_debugcheck(struct btree *b, struct bkey_s_c k) +void bkey_debugcheck(struct cache_set *c, struct btree *b, struct bkey_s_c k) { enum bkey_type type = btree_node_type(b); const struct bkey_ops *ops = bch_bkey_ops[type]; @@ -52,27 +52,27 @@ void bkey_debugcheck(struct btree *b, struct bkey_s_c k) cache_set_bug_on(bkey_cmp(bkey_start_pos(k.k), b->data->min_key) < 0, - b->c, "key before start of btree node"); + c, "key before start of btree node"); cache_set_bug_on(bkey_cmp(k.k->p, b->data->max_key) > 0, - b->c, "key past end of btree node"); + c, "key past end of btree node"); - if (bkey_invalid(b->c, type, k)) { + if (bkey_invalid(c, type, k)) { char buf[160]; - bch_bkey_val_to_text(b, buf, sizeof(buf), k); - cache_set_bug(b->c, "invalid bkey %s", buf); + bch_bkey_val_to_text(c, b, buf, sizeof(buf), k); + cache_set_bug(c, "invalid bkey %s", buf); return; } if (k.k->type >= KEY_TYPE_GENERIC_NR && ops->key_debugcheck) - ops->key_debugcheck(b, k); + ops->key_debugcheck(c, b, k); } -void bch_bkey_val_to_text(struct btree *b, char *buf, - size_t size, struct bkey_s_c k) +void bch_bkey_val_to_text(struct cache_set *c, struct btree *b, + char *buf, size_t size, struct bkey_s_c k) { enum bkey_type type = btree_node_type(b); const struct bkey_ops *ops = bch_bkey_ops[type]; @@ -83,6 +83,6 @@ void bch_bkey_val_to_text(struct btree *b, char *buf, if (k.k->type >= KEY_TYPE_GENERIC_NR && ops->val_to_text) { out += scnprintf(out, end - out, " -> "); - ops->val_to_text(b, out, end - out, k); + ops->val_to_text(c, out, end - out, k); } } diff --git a/drivers/md/bcache/bkey_methods.h b/drivers/md/bcache/bkey_methods.h index e55dacbcf068..b0d422752a70 100644 --- a/drivers/md/bcache/bkey_methods.h +++ b/drivers/md/bcache/bkey_methods.h @@ -17,16 +17,18 @@ struct bkey; struct bkey_ops { bool (*key_invalid)(const struct cache_set *, struct bkey_s_c); - void (*key_debugcheck)(struct btree *, struct bkey_s_c); - void (*val_to_text)(const struct btree *, char *, size_t, - struct bkey_s_c); + void (*key_debugcheck)(struct cache_set *, struct btree *, + struct bkey_s_c); + void (*val_to_text)(struct cache_set *, char *, + size_t, struct bkey_s_c); bool is_extents; }; bool bkey_invalid(struct cache_set *, enum bkey_type, struct bkey_s_c); -void bkey_debugcheck(struct btree *, struct bkey_s_c); -void bch_bkey_val_to_text(struct btree *, char *, size_t, struct bkey_s_c); +void bkey_debugcheck(struct cache_set *, struct btree *, struct bkey_s_c); +void bch_bkey_val_to_text(struct cache_set *, struct btree *, + char *, size_t, struct bkey_s_c); #undef DEF_BTREE_ID diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 8356a24b3fb2..1464f5bb67ea 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -55,8 +55,8 @@ const char *bch_btree_id_names[BTREE_ID_NR] = { static int bch_btree_iter_traverse(struct btree_iter *); static int __bch_btree_insert_node(struct btree *, struct btree_iter *, struct keylist *, struct bch_replace_info *, - struct closure *, u64 *, unsigned, - struct keylist *, struct closure *); + u64 *, unsigned, struct keylist *, + struct closure *); static inline void mark_btree_node_intent_locked(struct btree_iter *iter, unsigned level) @@ -191,15 +191,17 @@ bool bch_btree_iter_upgrade(struct btree_iter *iter) return true; } -static inline struct btree_node_entry *write_block(struct btree *b) +static inline struct btree_node_entry *write_block(struct cache_set *c, + struct btree *b) { BUG_ON(!b->written); - return (void *) b->data + (b->written << (b->c->block_bits + 9)); + return (void *) b->data + (b->written << (c->block_bits + 9)); } /* Returns true if we sorted (i.e. invalidated iterators */ -static void bch_btree_init_next(struct btree *b, struct btree_iter *iter) +static void bch_btree_init_next(struct cache_set *c, struct btree *b, + struct btree_iter *iter) { unsigned nsets = b->keys.nsets; bool sorted; @@ -208,9 +210,9 @@ static void bch_btree_init_next(struct btree *b, struct btree_iter *iter) /* If not a leaf node, always sort */ if (b->level && b->keys.nsets) - bch_btree_sort(&b->keys, &b->c->sort); + bch_btree_sort(&b->keys, &c->sort); else - bch_btree_sort_lazy(&b->keys, &b->c->sort); + bch_btree_sort_lazy(&b->keys, &c->sort); sorted = nsets != b->keys.nsets; @@ -219,10 +221,10 @@ static void bch_btree_init_next(struct btree *b, struct btree_iter *iter) * sort) and we sorted down to a single set: */ if (nsets && !b->keys.nsets) - bch_btree_verify(b); + bch_btree_verify(c, b); - if (b->written < btree_blocks(b->c)) - bch_bset_init_next(&b->keys, &write_block(b)->keys); + if (b->written < btree_blocks(c)) + bch_bset_init_next(&b->keys, &write_block(c, b)->keys); if (iter && sorted) btree_iter_node_set(iter, b); @@ -251,12 +253,12 @@ static void bch_btree_init_next(struct btree *b, struct btree_iter *iter) PTR_BUCKET_NR(ca, ptr), (b)->written, \ (i)->u64s, ##__VA_ARGS__) -static const char *validate_bset(struct btree *b, struct cache *ca, +static const char *validate_bset(struct cache_set *c, struct btree *b, + struct cache *ca, const struct bch_extent_ptr *ptr, struct bset *i, unsigned blocks) { struct bkey_format *f = &b->keys.format; - struct cache_set *c = b->c; struct bkey_packed *k; if (i->version != BCACHE_BSET_VERSION) @@ -296,7 +298,7 @@ static const char *validate_bset(struct btree *b, struct cache *ca, char buf[160]; bkey_disassemble(&tup, f, k); - bch_bkey_val_to_text(b, buf, sizeof(buf), + bch_bkey_val_to_text(c, b, buf, sizeof(buf), bkey_tup_to_s_c(&tup)); btree_node_error(b, ca, ptr, "invalid bkey %s", buf); @@ -314,17 +316,17 @@ static const char *validate_bset(struct btree *b, struct cache *ca, return NULL; } -void bch_btree_node_read_done(struct btree *b, struct cache *ca, +void bch_btree_node_read_done(struct cache_set *c, struct btree *b, + struct cache *ca, const struct bch_extent_ptr *ptr) { - struct cache_set *c = b->c; struct btree_node_entry *bne; struct bset *i = &b->data->keys; struct btree_node_iter *iter; const char *err; int ret; - iter = mempool_alloc(b->c->fill_iter, GFP_NOIO); + iter = mempool_alloc(c->fill_iter, GFP_NOIO); iter->used = 0; iter->is_extents = b->keys.ops->is_extents; @@ -373,7 +375,7 @@ void bch_btree_node_read_done(struct btree *b, struct cache *ca, b->data->keys.u64s, block_bytes(c)); } else { - bne = write_block(b); + bne = write_block(c, b); i = &bne->keys; if (i->seq != b->data->keys.seq) @@ -392,7 +394,7 @@ void bch_btree_node_read_done(struct btree *b, struct cache *ca, block_bytes(c)); } - err = validate_bset(b, ca, ptr, i, blocks); + err = validate_bset(c, b, ca, ptr, i, blocks); if (err) goto err; @@ -409,7 +411,7 @@ void bch_btree_node_read_done(struct btree *b, struct cache *ca, } err = "corrupted btree"; - for (bne = write_block(b); + for (bne = write_block(c, b); bset_byte_offset(b, bne) < btree_bytes(c); bne = (void *) bne + block_bytes(c)) if (bne->keys.seq == b->data->keys.seq) @@ -441,7 +443,7 @@ static void btree_node_read_endio(struct bio *bio) bch_bbio_endio(to_bbio(bio), bio->bi_error, "reading btree"); } -static void bch_btree_node_read(struct btree *b) +static void bch_btree_node_read(struct cache_set *c, struct btree *b) { uint64_t start_time = local_clock(); struct closure cl; @@ -453,7 +455,7 @@ static void bch_btree_node_read(struct btree *b) closure_init_stack(&cl); - ca = bch_btree_pick_ptr(b->c, b, &ptr); + ca = bch_btree_pick_ptr(c, b, &ptr); if (!ca) { set_btree_node_io_error(b); goto missing; @@ -461,8 +463,8 @@ static void bch_btree_node_read(struct btree *b) percpu_ref_get(&ca->ref); - bio = to_bbio(bch_bbio_alloc(b->c)); - bio->bio.bi_iter.bi_size = btree_bytes(b->c); + bio = to_bbio(bch_bbio_alloc(c)); + bio->bio.bi_iter.bi_size = btree_bytes(c); bio->bio.bi_end_io = btree_node_read_endio; bio->bio.bi_private = &cl; bio_set_op_attrs(&bio->bio, REQ_OP_READ, REQ_META|READ_SYNC); @@ -478,19 +480,19 @@ static void bch_btree_node_read(struct btree *b) bch_meta_read_fault("btree")) set_btree_node_io_error(b); - bch_bbio_free(&bio->bio, b->c); + bch_bbio_free(&bio->bio, c); if (btree_node_io_error(b)) goto err; - bch_btree_node_read_done(b, ca, ptr); - bch_time_stats_update(&b->c->btree_read_time, start_time); + bch_btree_node_read_done(c, b, ca, ptr); + bch_time_stats_update(&c->btree_read_time, start_time); percpu_ref_put(&ca->ref); return; missing: - bch_cache_set_error(b->c, "no cache device for btree node"); + bch_cache_set_error(c, "no cache device for btree node"); percpu_ref_put(&ca->ref); return; @@ -500,10 +502,11 @@ err: percpu_ref_put(&ca->ref); } -static void btree_complete_write(struct btree *b, struct btree_write *w) +static void btree_complete_write(struct cache_set *c, struct btree *b, + struct btree_write *w) { if (w->have_pin) - journal_pin_drop(&b->c->journal, &w->journal); + journal_pin_drop(&c->journal, &w->journal); w->have_pin = false; } @@ -522,7 +525,7 @@ static void __btree_node_write_done(struct closure *cl) bch_bbio_free(b->bio, c); b->bio = NULL; - btree_complete_write(b, w); + btree_complete_write(c, b, w); if (btree_node_dirty(b) && c->btree_flush_delay) schedule_delayed_work(&b->work, c->btree_flush_delay * HZ); @@ -556,6 +559,7 @@ static void btree_node_write_endio(struct bio *bio) static void do_btree_node_write(struct closure *cl) { struct btree *b = container_of(cl, struct btree, io); + struct cache_set *c = b->c; struct bset *i = btree_bset_last(b); BKEY_PADDED(key) k; struct bkey_s_extent e; @@ -566,7 +570,7 @@ static void do_btree_node_write(struct closure *cl) trace_bcache_btree_write(b); - BUG_ON(b->written >= btree_blocks(b->c)); + BUG_ON(b->written >= btree_blocks(c)); BUG_ON(b->written && !i->u64s); BUG_ON(btree_bset_first(b)->seq != i->seq); @@ -577,32 +581,32 @@ static void do_btree_node_write(struct closure *cl) i->version = BCACHE_BSET_VERSION; - SET_BSET_CSUM_TYPE(i, CACHE_PREFERRED_CSUM_TYPE(&b->c->sb)); + SET_BSET_CSUM_TYPE(i, CACHE_PREFERRED_CSUM_TYPE(&c->sb)); if (!b->written) { - BUG_ON(b->data->magic != bset_magic(&b->c->sb)); + BUG_ON(b->data->magic != bset_magic(&c->sb)); b->data->format = b->keys.format; data = b->data; b->data->csum = btree_csum_set(b, b->data); blocks_to_write = __set_blocks(b->data, b->data->keys.u64s, - block_bytes(b->c)); + block_bytes(c)); } else { - struct btree_node_entry *bne = write_block(b); + struct btree_node_entry *bne = write_block(c, b); data = bne; bne->csum = btree_csum_set(b, bne); blocks_to_write = __set_blocks(bne, bne->keys.u64s, - block_bytes(b->c)); + block_bytes(c)); } - BUG_ON(b->written + blocks_to_write > btree_blocks(b->c)); + BUG_ON(b->written + blocks_to_write > btree_blocks(c)); BUG_ON(b->bio); - b->bio = bch_bbio_alloc(b->c); + b->bio = bch_bbio_alloc(c); /* Take an extra reference so that the bio_put() in * btree_node_write_endio() doesn't call bio_free() */ @@ -610,7 +614,7 @@ static void do_btree_node_write(struct closure *cl) b->bio->bi_end_io = btree_node_write_endio; b->bio->bi_private = cl; - b->bio->bi_iter.bi_size = blocks_to_write << (b->c->block_bits + 9); + b->bio->bi_iter.bi_size = blocks_to_write << (c->block_bits + 9); bio_set_op_attrs(b->bio, REQ_OP_WRITE, REQ_META|WRITE_SYNC|REQ_FUA); bch_bio_map(b->bio, data); @@ -634,11 +638,11 @@ static void do_btree_node_write(struct closure *cl) extent_for_each_ptr(e, ptr) SET_PTR_OFFSET(ptr, PTR_OFFSET(ptr) + - (b->written << b->c->block_bits)); + (b->written << c->block_bits)); rcu_read_lock(); - extent_for_each_online_device(b->c, e, ptr, ca) - atomic_long_add(blocks_to_write << b->c->block_bits, + extent_for_each_online_device(c, e, ptr, ca) + atomic_long_add(blocks_to_write << c->block_bits, &ca->btree_sectors_written); rcu_read_unlock(); @@ -653,7 +657,7 @@ static void do_btree_node_write(struct closure *cl) memcpy(page_address(bv->bv_page), base + (j << PAGE_SHIFT), PAGE_SIZE); - bch_submit_bbio_replicas(b->bio, b->c, &k.key, 0, true); + bch_submit_bbio_replicas(b->bio, c, &k.key, 0, true); continue_at(cl, btree_node_write_done, NULL); } else { trace_bcache_btree_bounce_write_fail(b); @@ -661,7 +665,7 @@ static void do_btree_node_write(struct closure *cl) b->bio->bi_vcnt = 0; bch_bio_map(b->bio, data); - bch_submit_bbio_replicas(b->bio, b->c, &k.key, 0, true); + bch_submit_bbio_replicas(b->bio, c, &k.key, 0, true); closure_sync(cl); continue_at_nobarrier(cl, __btree_node_write_done, NULL); @@ -707,7 +711,7 @@ void bch_btree_node_write(struct btree *b, struct closure *parent, __bch_btree_node_write(b, parent, -1); six_lock_write(&b->lock); - bch_btree_init_next(b, iter); + bch_btree_init_next(b->c, b, iter); six_unlock_write(&b->lock); } @@ -774,7 +778,8 @@ restart: closure_sync(&cl); } -void bch_btree_push_journal_seq(struct btree *b, struct closure *cl) +void bch_btree_push_journal_seq(struct cache_set *c, struct btree *b, + struct closure *cl) { int i; @@ -782,7 +787,7 @@ void bch_btree_push_journal_seq(struct btree *b, struct closure *cl) u64 seq = b->keys.set[i].data->journal_seq; if (seq) { - bch_journal_push_seq(b->c, seq, cl); + bch_journal_push_seq(c, seq, cl); break; } } @@ -811,7 +816,7 @@ void bch_recalc_btree_reserve(struct cache_set *c) #define mca_can_free(c) \ max_t(int, 0, c->btree_cache_used - c->btree_cache_reserve) -static void mca_data_free(struct btree *b) +static void mca_data_free(struct cache_set *c, struct btree *b) { BUG_ON(b->io_mutex.count != 1); @@ -819,8 +824,8 @@ static void mca_data_free(struct btree *b) b->data = NULL; bch_btree_keys_free(&b->keys); - b->c->btree_cache_used--; - list_move(&b->list, &b->c->btree_cache_freed); + c->btree_cache_used--; + list_move(&b->list, &c->btree_cache_freed); } static const struct rhashtable_params bch_btree_cache_params = { @@ -830,7 +835,7 @@ static const struct rhashtable_params bch_btree_cache_params = { .hashfn = jhash, }; -static void mca_bucket_free(struct btree *b) +static void mca_bucket_free(struct cache_set *c, struct btree *b) { BUG_ON(btree_node_dirty(b)); BUG_ON(!list_empty_careful(&b->journal_seq_blacklisted)); @@ -838,17 +843,17 @@ static void mca_bucket_free(struct btree *b) b->keys.nsets = 0; b->keys.set[0].data = NULL; - rhashtable_remove_fast(&b->c->btree_cache_table, &b->hash, + rhashtable_remove_fast(&c->btree_cache_table, &b->hash, bch_btree_cache_params); /* Cause future lookups for this node to fail: */ bkey_i_to_extent(&b->key)->v.ptr[0]._val = 0; - list_move(&b->list, &b->c->btree_cache_freeable); + list_move(&b->list, &c->btree_cache_freeable); } -static void mca_data_alloc(struct btree *b, gfp_t gfp) +static void mca_data_alloc(struct cache_set *c, struct btree *b, gfp_t gfp) { - unsigned order = ilog2(b->c->btree_pages); + unsigned order = ilog2(c->btree_pages); b->data = (void *) __get_free_pages(gfp, order); if (!b->data) @@ -857,13 +862,13 @@ static void mca_data_alloc(struct btree *b, gfp_t gfp) if (bch_btree_keys_alloc(&b->keys, order, gfp)) goto err; - b->c->btree_cache_used++; - list_move(&b->list, &b->c->btree_cache_freeable); + c->btree_cache_used++; + list_move(&b->list, &c->btree_cache_freeable); return; err: free_pages((unsigned long) b->data, order); b->data = NULL; - list_move(&b->list, &b->c->btree_cache_freed); + list_move(&b->list, &c->btree_cache_freed); } static struct btree *mca_bucket_alloc(struct cache_set *c, gfp_t gfp) @@ -880,7 +885,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c, gfp_t gfp) INIT_LIST_HEAD(&b->journal_seq_blacklisted); b->writes[1].index = 1; - mca_data_alloc(b, gfp); + mca_data_alloc(c, b, gfp); return b->data ? b : NULL; } @@ -888,13 +893,13 @@ static struct btree *mca_bucket_alloc(struct cache_set *c, gfp_t gfp) * this version is for btree nodes that have already been freed (we're not * reaping a real btree node) */ -static int mca_reap_notrace(struct btree *b, bool flush) +static int mca_reap_notrace(struct cache_set *c, struct btree *b, bool flush) { struct closure cl; struct bset *i; closure_init_stack(&cl); - lockdep_assert_held(&b->c->btree_cache_lock); + lockdep_assert_held(&c->btree_cache_lock); if (!six_trylock_intent(&b->lock)) return -ENOMEM; @@ -908,7 +913,7 @@ static int mca_reap_notrace(struct btree *b, bool flush) b->io_mutex.count == 1 && !btree_node_dirty(b) && (((void *) i - (void *) b->data) >> - (b->c->block_bits + 9) >= b->written)); + (c->block_bits + 9) >= b->written)); /* XXX: we need a better solution for this, this will cause deadlocks */ if (!list_empty_careful(&b->journal_seq_blacklisted)) @@ -940,9 +945,9 @@ out_unlock_intent: return -ENOMEM; } -static int mca_reap(struct btree *b, bool flush) +static int mca_reap(struct cache_set *c, struct btree *b, bool flush) { - int ret = mca_reap_notrace(b, flush); + int ret = mca_reap_notrace(c, b, flush); trace_bcache_mca_reap(b, ret); return ret; @@ -993,8 +998,8 @@ static unsigned long bch_mca_scan(struct shrinker *shrink, break; if (++i > 3 && - !mca_reap_notrace(b, false)) { - mca_data_free(b); + !mca_reap_notrace(c, b, false)) { + mca_data_free(c, b); six_unlock_write(&b->lock); six_unlock_intent(&b->lock); freed++; @@ -1012,9 +1017,9 @@ static unsigned long bch_mca_scan(struct shrinker *shrink, } if (!b->accessed && - !mca_reap(b, false)) { - mca_bucket_free(b); - mca_data_free(b); + !mca_reap(c, b, false)) { + mca_bucket_free(c, b); + mca_data_free(c, b); six_unlock_write(&b->lock); six_unlock_intent(&b->lock); freed++; @@ -1081,10 +1086,10 @@ void bch_btree_cache_free(struct cache_set *c) b = list_first_entry(&c->btree_cache, struct btree, list); if (btree_node_dirty(b)) - btree_complete_write(b, btree_current_write(b)); + btree_complete_write(c, b, btree_current_write(b)); clear_btree_node_dirty(b); - mca_data_free(b); + mca_data_free(c, b); } while (!list_empty(&c->btree_cache_freed)) { @@ -1192,11 +1197,11 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct closure *cl) trace_bcache_mca_cannibalize(c, cl); list_for_each_entry_reverse(b, &c->btree_cache, list) - if (!mca_reap(b, false)) + if (!mca_reap(c, b, false)) goto out; list_for_each_entry_reverse(b, &c->btree_cache, list) - if (!mca_reap(b, true)) + if (!mca_reap(c, b, true)) goto out; /* @@ -1207,7 +1212,7 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct closure *cl) cond_resched(); } out: - mca_bucket_free(b); + mca_bucket_free(c, b); return b; } @@ -1242,15 +1247,15 @@ static struct btree *mca_alloc(struct cache_set *c, const struct bkey_i *k, * the list. Check if there's any freed nodes there: */ list_for_each_entry(b, &c->btree_cache_freeable, list) - if (!mca_reap_notrace(b, false)) + if (!mca_reap_notrace(c, b, false)) goto out; /* We never free struct btree itself, just the memory that holds the on * disk node. Check the freed list before allocating a new one: */ list_for_each_entry(b, &c->btree_cache_freed, list) - if (!mca_reap_notrace(b, false)) { - mca_data_alloc(b, __GFP_NOWARN|GFP_NOIO); + if (!mca_reap_notrace(c, b, false)) { + mca_data_alloc(c, b, __GFP_NOWARN|GFP_NOIO); if (!b->data) goto err; else @@ -1280,7 +1285,7 @@ out: bch_btree_keys_init(&b->keys, b->level ? &bch_btree_interior_node_ops : bch_btree_ops[id], - &b->c->expensive_debug_checks); + &c->expensive_debug_checks); out_unlock: mutex_unlock(&c->btree_cache_lock); @@ -1324,7 +1329,7 @@ static noinline struct btree *bch_btree_node_fill(struct btree_iter *iter, if (btree_node_read_locked(iter, level + 1)) btree_node_unlock(iter, level + 1); - bch_btree_node_read(b); + bch_btree_node_read(iter->c, b); six_unlock_write(&b->lock); if (btree_want_intent(iter, level)) { @@ -1433,7 +1438,7 @@ retry: /* Btree alloc */ -void btree_node_free(struct btree *b) +void btree_node_free(struct cache_set *c, struct btree *b) { trace_bcache_btree_node_free(b); @@ -1442,22 +1447,22 @@ void btree_node_free(struct btree *b) six_lock_write(&b->lock); if (btree_node_dirty(b)) - btree_complete_write(b, btree_current_write(b)); + btree_complete_write(c, b, btree_current_write(b)); clear_btree_node_dirty(b); if (!list_empty_careful(&b->journal_seq_blacklisted)) { - mutex_lock(&b->c->journal.blacklist_lock); + mutex_lock(&c->journal.blacklist_lock); list_del_init(&b->journal_seq_blacklisted); - mutex_unlock(&b->c->journal.blacklist_lock); + mutex_unlock(&c->journal.blacklist_lock); } cancel_delayed_work(&b->work); - bch_bucket_free(b->c, &b->key); + bch_bucket_free(c, &b->key); - mutex_lock(&b->c->btree_cache_lock); - mca_bucket_free(b); - mutex_unlock(&b->c->btree_cache_lock); + mutex_lock(&c->btree_cache_lock); + mca_bucket_free(c, b); + mutex_unlock(&c->btree_cache_lock); six_unlock_write(&b->lock); } @@ -1476,9 +1481,8 @@ void btree_node_free(struct btree *b) * is nothing new to be done. This just guarantees that there is a * journal write. */ -static void bch_btree_set_root(struct btree *b) +static void bch_btree_set_root(struct cache_set *c, struct btree *b) { - struct cache_set *c = b->c; struct journal_res res; struct closure cl; struct btree *old; @@ -1548,12 +1552,13 @@ static struct btree *bch_btree_node_alloc(struct cache_set *c, int level, return b; } -struct btree *__btree_node_alloc_replacement(struct btree *b, +struct btree *__btree_node_alloc_replacement(struct cache_set *c, + struct btree *b, struct bkey_format format) { struct btree *n; - n = bch_btree_node_alloc(b->c, b->level, b->btree_id); + n = bch_btree_node_alloc(c, b->level, b->btree_id); n->data->min_key = b->data->min_key; n->data->max_key = b->data->max_key; @@ -1562,7 +1567,7 @@ struct btree *__btree_node_alloc_replacement(struct btree *b, bch_btree_sort_into(&n->keys, &b->keys, b->keys.ops->key_normalize, - &b->c->sort); + &c->sort); n->key.k.p = b->key.k.p; trace_bcache_btree_node_alloc_replacement(b, n); @@ -1611,7 +1616,8 @@ static struct bkey_format bch_btree_calc_format(struct btree *b) return bch_bkey_format_done(&s); } -struct btree *btree_node_alloc_replacement(struct btree *b) +struct btree *btree_node_alloc_replacement(struct cache_set *c, + struct btree *b) { struct bkey_format new_f = bch_btree_calc_format(b); @@ -1622,7 +1628,7 @@ struct btree *btree_node_alloc_replacement(struct btree *b) if (!btree_node_format_fits(b, &new_f)) new_f = b->keys.format; - return __btree_node_alloc_replacement(b, new_f); + return __btree_node_alloc_replacement(c, b, new_f); } static int __btree_check_reserve(struct cache_set *c, @@ -1674,13 +1680,13 @@ static int __btree_check_reserve(struct cache_set *c, return mca_cannibalize_lock(c, cl); } -int btree_check_reserve(struct btree *b, struct btree_iter *iter, - enum alloc_reserve reserve, +int btree_check_reserve(struct cache_set *c, struct btree *b, + struct btree_iter *iter, enum alloc_reserve reserve, unsigned extra_nodes, bool check_enospc) { unsigned depth = btree_node_root(b)->level - b->level; - return __btree_check_reserve(b->c, reserve, + return __btree_check_reserve(c, reserve, btree_reserve_required_nodes(depth) + extra_nodes, iter ? &iter->cl : NULL, check_enospc); } @@ -1719,7 +1725,7 @@ int bch_btree_root_alloc(struct cache_set *c, enum btree_id id, bch_btree_node_write(b, writes, NULL); - bch_btree_set_root(b); + bch_btree_set_root(c, b); six_unlock_intent(&b->lock); return 0; @@ -1739,7 +1745,7 @@ int bch_btree_root_read(struct cache_set *c, enum btree_id id, } BUG_ON(!b); - bch_btree_node_read(b); + bch_btree_node_read(c, b); six_unlock_write(&b->lock); if (btree_node_io_error(b)) { @@ -1747,7 +1753,7 @@ int bch_btree_root_read(struct cache_set *c, enum btree_id id, return -EIO; } - bch_btree_set_root(b); + bch_btree_set_root(c, b); six_unlock_intent(&b->lock); return 0; @@ -1761,6 +1767,7 @@ int bch_btree_root_read(struct cache_set *c, enum btree_id id, */ int bch_btree_node_rewrite(struct btree *b, struct btree_iter *iter, bool wait) { + struct cache_set *c = iter->c; struct btree *n, *parent = iter->nodes[b->level + 1]; struct closure cl; int ret; @@ -1771,16 +1778,16 @@ int bch_btree_node_rewrite(struct btree *b, struct btree_iter *iter, bool wait) if (!bch_btree_iter_upgrade(iter)) return -EINTR; - ret = btree_check_reserve(b, wait ? iter : NULL, + ret = btree_check_reserve(c, b, wait ? iter : NULL, iter->btree_id, 1, true); if (ret) { trace_bcache_btree_gc_rewrite_node_fail(b); return ret; } - bch_btree_push_journal_seq(b, &cl); + bch_btree_push_journal_seq(c, b, &cl); - n = btree_node_alloc_replacement(b); + n = btree_node_alloc_replacement(c, b); six_unlock_write(&n->lock); trace_bcache_btree_gc_rewrite_node(b); @@ -1791,13 +1798,13 @@ int bch_btree_node_rewrite(struct btree *b, struct btree_iter *iter, bool wait) if (parent) { ret = bch_btree_insert_node(parent, iter, &keylist_single(&n->key), - NULL, NULL, NULL, 0); + NULL, NULL, 0); BUG_ON(ret); } else { - bch_btree_set_root(n); + bch_btree_set_root(c, n); } - btree_node_free(b); + btree_node_free(iter->c, b); BUG_ON(iter->nodes[b->level] != b); @@ -1825,13 +1832,11 @@ static void btree_node_flush(struct journal_entry_pin *pin) * * The insert is journalled. */ -void bch_btree_insert_and_journal(struct btree *b, +void bch_btree_insert_and_journal(struct cache_set *c, struct btree *b, struct btree_node_iter *node_iter, struct bkey_i *insert, struct journal_res *res) { - struct cache_set *c = b->c; - bch_bset_insert(&b->keys, node_iter, insert); if (!btree_node_dirty(b)) { @@ -1877,8 +1882,7 @@ void bch_btree_insert_and_journal(struct btree *b, static bool btree_insert_key(struct btree_iter *iter, struct btree *b, struct keylist *insert_keys, struct bch_replace_info *replace, - struct journal_res *res, - unsigned flags) + struct journal_res *res, unsigned flags) { bool dequeue = false; struct btree_node_iter *node_iter = &iter->node_iters[b->level]; @@ -1900,15 +1904,15 @@ static bool btree_insert_key(struct btree_iter *iter, struct btree *b, if (bkey_cmp(insert->k.p, b->key.k.p) > 0) bch_cut_back(b->key.k.p, &insert->k); - do_insert = bch_insert_fixup_extent(b, insert, node_iter, - replace, &done, res, - flags); + do_insert = bch_insert_fixup_extent(iter->c, b, insert, + node_iter, replace, + &done, res, flags); bch_cut_front(done, orig); dequeue = (orig->k.size == 0); } else { BUG_ON(bkey_cmp(insert->k.p, b->key.k.p) > 0); - do_insert = bch_insert_fixup_key(b, insert, node_iter, + do_insert = bch_insert_fixup_key(iter->c, b, insert, node_iter, replace, &done, res); dequeue = true; } @@ -1977,9 +1981,7 @@ bch_btree_insert_keys(struct btree *b, struct btree_iter *iter, struct keylist *insert_keys, struct bch_replace_info *replace, - struct closure *persistent, - u64 *journal_seq, - unsigned flags) + u64 *journal_seq, unsigned flags) { bool done = false, inserted = false, need_split = false; struct journal_res res = { 0, 0 }; @@ -2012,7 +2014,7 @@ bch_btree_insert_keys(struct btree *b, /* just wrote a set? */ if (btree_node_need_init_next(b)) - bch_btree_init_next(b, iter); + bch_btree_init_next(iter->c, b, iter); while (!bch_keylist_empty(insert_keys)) { k = bch_keylist_front(insert_keys); @@ -2045,9 +2047,7 @@ bch_btree_insert_keys(struct btree *b, six_unlock_write(&b->lock); if (res.ref) - bch_journal_res_put(iter->c, &res, - bch_keylist_empty(insert_keys) - ? persistent : NULL); + bch_journal_res_put(iter->c, &res, NULL); } if (inserted && b->written) { @@ -2082,12 +2082,11 @@ bch_btree_insert_keys(struct btree *b, static int btree_split(struct btree *b, struct btree_iter *iter, struct keylist *insert_keys, - struct bch_replace_info *replace, - struct closure *persistent, unsigned flags, struct keylist *parent_keys, struct closure *stack_cl) { + struct cache_set *c = iter->c; struct btree *parent = iter->nodes[b->level + 1]; struct btree *n1, *n2 = NULL, *n3 = NULL; struct bset *set1, *set2; @@ -2102,7 +2101,7 @@ static int btree_split(struct btree *b, BUG_ON(!btree_node_intent_locked(iter, btree_node_root(b)->level)); /* After this check we cannot return -EAGAIN anymore */ - ret = btree_check_reserve(b, iter, iter->btree_id, 0, + ret = btree_check_reserve(c, b, iter, iter->btree_id, 0, !(flags & BTREE_INSERT_NOFAIL)); if (ret) { /* If splitting an interior node, we've already split a leaf, @@ -2115,26 +2114,26 @@ static int btree_split(struct btree *b, WARN(1, "insufficient reserve for split\n"); } - bch_btree_push_journal_seq(b, stack_cl); + bch_btree_push_journal_seq(c, b, stack_cl); - n1 = btree_node_alloc_replacement(b); + n1 = btree_node_alloc_replacement(c, b); set1 = btree_bset_first(n1); if (__set_blocks(n1->data, n1->data->keys.u64s + u64s_to_insert, - block_bytes(n1->c)) > btree_blocks(iter->c) * 3 / 4) { + block_bytes(n1->c)) > btree_blocks(c) * 3 / 4) { size_t nr_packed = 0, nr_unpacked = 0; trace_bcache_btree_node_split(b, set1->u64s); - n2 = bch_btree_node_alloc(iter->c, b->level, + n2 = bch_btree_node_alloc(c, b->level, iter->btree_id); n2->data->max_key = n1->data->max_key; n2->keys.format = n1->keys.format; set2 = btree_bset_first(n2); if (!parent) - n3 = __btree_root_alloc(iter->c, b->level + 1, + n3 = __btree_root_alloc(c, b->level + 1, iter->btree_id); /* @@ -2199,12 +2198,12 @@ static int btree_split(struct btree *b, if (b->level) { btree_iter_node_set(iter, n1); status = bch_btree_insert_keys(n1, iter, insert_keys, - replace, NULL, NULL, 0); + NULL, NULL, 0); BUG_ON(status == BTREE_INSERT_NEED_SPLIT); btree_iter_node_set(iter, n2); status = bch_btree_insert_keys(n2, iter, insert_keys, - replace, NULL, NULL, 0); + NULL, NULL, 0); BUG_ON(status == BTREE_INSERT_NEED_SPLIT); BUG_ON(!bch_keylist_empty(insert_keys)); iter->nodes[b->level] = b; /* still have b locked */ @@ -2225,9 +2224,9 @@ static int btree_split(struct btree *b, * old node, but not the node we just created, mark it: */ six_lock_write(&b->lock); - if (gc_will_visit_node(iter->c, n2) && - !gc_will_visit_node(iter->c, n1)) - btree_gc_mark_node(iter->c, n1, NULL); + if (gc_will_visit_node(c, n2) && + !gc_will_visit_node(c, n1)) + btree_gc_mark_node(c, n1, NULL); six_unlock_write(&b->lock); } else { trace_bcache_btree_node_compact(b, set1->u64s); @@ -2236,7 +2235,7 @@ static int btree_split(struct btree *b, if (b->level) { btree_iter_node_set(iter, n1); status = bch_btree_insert_keys(n1, iter, insert_keys, - replace, NULL, NULL, 0); + NULL, NULL, 0); BUG_ON(status != BTREE_INSERT_INSERTED); BUG_ON(!bch_keylist_empty(insert_keys)); iter->nodes[b->level] = b; /* still have b locked */ @@ -2255,7 +2254,7 @@ static int btree_split(struct btree *b, btree_iter_node_set(iter, n3); bch_btree_insert_keys(n3, iter, parent_keys, - NULL, NULL, NULL, 0); + NULL, NULL, 0); bch_btree_node_write(n3, stack_cl, NULL); /* @@ -2266,7 +2265,7 @@ static int btree_split(struct btree *b, closure_sync(stack_cl); - bch_btree_set_root(n3); + bch_btree_set_root(c, n3); } else if (!parent) { BUG_ON(parent_keys->start_keys_p != &parent_keys->inline_keys[0]); @@ -2275,18 +2274,17 @@ static int btree_split(struct btree *b, /* Root filled up but didn't need to be split */ closure_sync(stack_cl); - bch_btree_set_root(n1); + bch_btree_set_root(c, n1); } else { /* Split a non root node */ closure_sync(stack_cl); - ret = __bch_btree_insert_node(parent, iter, parent_keys, - NULL, NULL, NULL, 0, - parent_keys, stack_cl); + ret = __bch_btree_insert_node(parent, iter, parent_keys, NULL, + NULL, 0, parent_keys, stack_cl); BUG_ON(ret || !bch_keylist_empty(parent_keys)); } - btree_node_free(b); + btree_node_free(c, b); /* Update iterator, and finish insert now that new nodes are visible: */ BUG_ON(iter->nodes[b->level] != b); @@ -2302,7 +2300,7 @@ static int btree_split(struct btree *b, btree_iter_node_set(iter, n1); } - bch_time_stats_update(&iter->c->btree_split_time, start_time); + bch_time_stats_update(&c->btree_split_time, start_time); return 0; } @@ -2311,9 +2309,7 @@ static int __bch_btree_insert_node(struct btree *b, struct btree_iter *iter, struct keylist *insert_keys, struct bch_replace_info *replace, - struct closure *persistent, - u64 *journal_seq, - unsigned flags, + u64 *journal_seq, unsigned flags, struct keylist *split_keys, struct closure *stack_cl) { @@ -2324,8 +2320,7 @@ static int __bch_btree_insert_node(struct btree *b, BUG_ON(b->level && replace); BUG_ON(!b->written); - if (bch_btree_insert_keys(b, iter, insert_keys, replace, - persistent, journal_seq, + if (bch_btree_insert_keys(b, iter, insert_keys, replace, journal_seq, flags) == BTREE_INSERT_NEED_SPLIT) { if (!b->level) { iter->locks_want = BTREE_MAX_DEPTH; @@ -2333,8 +2328,8 @@ static int __bch_btree_insert_node(struct btree *b, return -EINTR; } - return btree_split(b, iter, insert_keys, replace, persistent, - flags, split_keys, stack_cl); + return btree_split(b, iter, insert_keys, flags, + split_keys, stack_cl); } return 0; @@ -2357,9 +2352,7 @@ int bch_btree_insert_node(struct btree *b, struct btree_iter *iter, struct keylist *insert_keys, struct bch_replace_info *replace, - struct closure *persistent, - u64 *journal_seq, - unsigned flags) + u64 *journal_seq, unsigned flags) { struct closure stack_cl; struct keylist split_keys; @@ -2371,7 +2364,7 @@ int bch_btree_insert_node(struct btree *b, flags |= FAIL_IF_STALE; return __bch_btree_insert_node(b, iter, insert_keys, replace, - persistent, journal_seq, flags, + journal_seq, flags, &split_keys, &stack_cl); } @@ -2411,9 +2404,7 @@ int bch_btree_insert_node(struct btree *b, int bch_btree_insert_at(struct btree_iter *iter, struct keylist *insert_keys, struct bch_replace_info *replace, - struct closure *persistent, - u64 *journal_seq, - unsigned flags) + u64 *journal_seq, unsigned flags) { int ret = -EINTR; @@ -2428,16 +2419,28 @@ int bch_btree_insert_at(struct btree_iter *iter, while (1) { ret = bch_btree_insert_node(iter->nodes[0], iter, insert_keys, - replace, persistent, - journal_seq, flags); -traverse: - if (ret == -EAGAIN) - bch_btree_iter_unlock(iter); + replace, journal_seq, flags); - if (bch_keylist_empty(insert_keys) || - ret == -EROFS) + /* + * We don't test against success because we might have + * successfully inserted the keys on the keylist, but have more + * to insert in the next leaf node: + */ + if (likely(bch_keylist_empty(insert_keys))) break; + /* + * -EAGAIN means we have to drop locks and wait on + * mca_cannibalize_lock - btree_iter_unlock() does this + */ + if (ret == -EAGAIN) + bch_btree_iter_unlock(iter); + else if (ret && ret != -EINTR) + break; +traverse: + /* + * Can't retry, make sure we return an error: + */ if (flags & BTREE_INSERT_ATOMIC) { ret = ret ?: -EINTR; break; @@ -2484,7 +2487,7 @@ int bch_btree_insert_check_key(struct btree_iter *iter, __btree_iter_node_set(iter, iter->nodes[0], bkey_start_pos(&check_key->k)); - return bch_btree_insert_at(iter, &keylist_single(&tmp.key), NULL, + return bch_btree_insert_at(iter, &keylist_single(&tmp.key), NULL, NULL, BTREE_INSERT_ATOMIC); } @@ -2500,8 +2503,12 @@ int bch_btree_insert(struct cache_set *c, enum btree_id id, struct closure *persistent, u64 *journal_seq, int flags) { struct btree_iter iter; + u64 jseq = 0; int ret, ret2; + if (!journal_seq) + journal_seq = &jseq; + bch_btree_iter_init_intent(&iter, c, id, bkey_start_pos(&bch_keylist_front(keys)->k)); @@ -2510,9 +2517,12 @@ int bch_btree_insert(struct cache_set *c, enum btree_id id, goto out; ret = bch_btree_insert_at(&iter, keys, replace, - persistent, journal_seq, flags); + journal_seq, flags); out: ret2 = bch_btree_iter_unlock(&iter); + if (persistent) + bch_journal_push_seq(c, *journal_seq, persistent); + return ret ?: ret2; } @@ -2525,8 +2535,12 @@ int bch_btree_update(struct cache_set *c, enum btree_id id, struct bkey_i *k, { struct btree_iter iter; struct bkey_s_c u; + u64 jseq = 0; int ret, ret2; + if (!journal_seq) + journal_seq = &jseq; + EBUG_ON(id == BTREE_ID_EXTENTS); bch_btree_iter_init_intent(&iter, c, id, k->k.p); @@ -2539,9 +2553,12 @@ int bch_btree_update(struct cache_set *c, enum btree_id id, struct bkey_i *k, BUG_ON(!u.k || bkey_deleted(u.k)); ret = bch_btree_insert_at(&iter, &keylist_single(k), NULL, - persistent, journal_seq, 0); + journal_seq, 0); out: ret2 = bch_btree_iter_unlock(&iter); + if (persistent) + bch_journal_push_seq(c, *journal_seq, persistent); + return ret ?: ret2; } @@ -2576,7 +2593,7 @@ static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *iter) ret = bkey_tup_to_s_c(&iter->tup); if (expensive_debug_checks(iter->c)) - bkey_debugcheck(iter->nodes[iter->level], ret); + bkey_debugcheck(iter->c, iter->nodes[iter->level], ret); return ret; } @@ -2596,7 +2613,7 @@ static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter) ret = bkey_tup_to_s_c(&iter->tup); if (expensive_debug_checks(iter->c)) - bkey_debugcheck(iter->nodes[iter->level], ret); + bkey_debugcheck(iter->c, iter->nodes[iter->level], ret); return ret; } diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h index 557c2f77fa29..8cb60f63726e 100644 --- a/drivers/md/bcache/btree.h +++ b/drivers/md/bcache/btree.h @@ -104,8 +104,6 @@ struct btree { struct six_lock lock; - struct cache_set *c; - unsigned long flags; u16 written; /* would be nice to kill */ u8 level; @@ -114,17 +112,20 @@ struct btree { struct btree_keys keys; struct btree_node *data; + struct cache_set *c; + + /* lru list */ + struct list_head list; + /* For outstanding btree writes, used as a lock - protects write_idx */ struct closure io; struct semaphore io_mutex; - - struct list_head list; struct delayed_work work; - struct list_head journal_seq_blacklisted; - struct btree_write writes[2]; struct bio *bio; + + struct list_head journal_seq_blacklisted; }; #define BTREE_FLAG(flag) \ @@ -393,14 +394,15 @@ static inline void bch_btree_iter_cond_resched(struct btree_iter *iter) #define btree_node_root(_b) ((_b)->c->btree_roots[(_b)->btree_id]) -void btree_node_free(struct btree *); +void btree_node_free(struct cache_set *, struct btree *); void bch_btree_node_write(struct btree *, struct closure *, struct btree_iter *); -void bch_btree_node_read_done(struct btree *, struct cache *, - const struct bch_extent_ptr *); +void bch_btree_node_read_done(struct cache_set *, struct btree *, + struct cache *, const struct bch_extent_ptr *); void bch_btree_flush(struct cache_set *); -void bch_btree_push_journal_seq(struct btree *, struct closure *); +void bch_btree_push_journal_seq(struct cache_set *, struct btree *, + struct closure *); /** * btree_node_format_fits - check if we could rewrite node with a new format @@ -430,17 +432,19 @@ static inline bool btree_node_format_fits(struct btree *b, void __bch_btree_calc_format(struct bkey_format_state *, struct btree *); -struct btree *__btree_node_alloc_replacement(struct btree *, +struct btree *__btree_node_alloc_replacement(struct cache_set *, + struct btree *, struct bkey_format); -struct btree *btree_node_alloc_replacement(struct btree *); -int btree_check_reserve(struct btree *, struct btree_iter *, - enum alloc_reserve, unsigned, bool); +struct btree *btree_node_alloc_replacement(struct cache_set *, struct btree *); +int btree_check_reserve(struct cache_set *c, struct btree *, + struct btree_iter *, enum alloc_reserve, + unsigned, bool); int bch_btree_root_alloc(struct cache_set *, enum btree_id, struct closure *); int bch_btree_root_read(struct cache_set *, enum btree_id, const struct bkey_i *, unsigned); -void bch_btree_insert_and_journal(struct btree *, +void bch_btree_insert_and_journal(struct cache_set *, struct btree *, struct btree_node_iter *, struct bkey_i *, struct journal_res *); @@ -449,7 +453,7 @@ struct bch_replace_info; int bch_btree_insert_node(struct btree *, struct btree_iter *, struct keylist *, struct bch_replace_info *, - struct closure *, u64 *, unsigned); + u64 *, unsigned); /* * Don't drop/retake locks: instead return -EINTR if need to upgrade to intent @@ -469,8 +473,7 @@ int bch_btree_insert_node(struct btree *, struct btree_iter *, #define FAIL_IF_STALE (1 << 2) int bch_btree_insert_at(struct btree_iter *, struct keylist *, - struct bch_replace_info *, struct closure *, - u64 *, unsigned); + struct bch_replace_info *, u64 *, unsigned); int bch_btree_insert_check_key(struct btree_iter *, struct bkey_i *); int bch_btree_insert(struct cache_set *, enum btree_id, struct keylist *, struct bch_replace_info *, struct closure *, diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index 469190daf63d..1ab7ddc20636 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c @@ -30,9 +30,8 @@ static void btree_verify_endio(struct bio *bio) closure_put(cl); } -void bch_btree_verify(struct btree *b) +void bch_btree_verify(struct cache_set *c, struct btree *b) { - struct cache_set *c = b->c; struct btree *v = c->verify_data; struct btree_node *n_ondisk, *n_sorted, *n_inmemory; struct bset *sorted, *inmemory; @@ -82,7 +81,7 @@ void bch_btree_verify(struct btree *b) memcpy(n_ondisk, n_sorted, btree_bytes(c)); - bch_btree_node_read_done(v, ca, ptr); + bch_btree_node_read_done(c, v, ca, ptr); n_sorted = c->verify_data->data; percpu_ref_put(&ca->ref); @@ -268,7 +267,8 @@ static ssize_t bch_read_btree(struct file *file, char __user *buf, bch_btree_iter_init(&iter, i->c, i->id, i->from); while ((k = bch_btree_iter_peek(&iter)).k) { - bch_bkey_val_to_text(iter.nodes[0], i->buf, sizeof(i->buf), k); + bch_bkey_val_to_text(i->c, iter.nodes[0], i->buf, + sizeof(i->buf), k); i->bytes = strlen(i->buf); BUG_ON(i->bytes >= PAGE_SIZE); i->buf[i->bytes] = '\n'; diff --git a/drivers/md/bcache/debug.h b/drivers/md/bcache/debug.h index 83dad863dc17..a3567a2e10c9 100644 --- a/drivers/md/bcache/debug.h +++ b/drivers/md/bcache/debug.h @@ -7,7 +7,7 @@ struct cache_set; #ifdef CONFIG_BCACHE_DEBUG -void bch_btree_verify(struct btree *); +void bch_btree_verify(struct cache_set *, struct btree *); void bch_data_verify(struct cached_dev *, struct bio *); #define expensive_debug_checks(c) ((c)->expensive_debug_checks) @@ -16,7 +16,7 @@ void bch_data_verify(struct cached_dev *, struct bio *); #else /* DEBUG */ -static inline void bch_btree_verify(struct btree *b) {} +static inline void bch_btree_verify(struct cache_set *c, struct btree *b) {} static inline void bch_data_verify(struct cached_dev *dc, struct bio *bio) {} #define expensive_debug_checks(c) 0 diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c index 1384843c094f..2e9e80266143 100644 --- a/drivers/md/bcache/extents.c +++ b/drivers/md/bcache/extents.c @@ -115,7 +115,8 @@ void bch_key_sort_fix_overlapping(struct btree_keys *b, /* This returns true if insert should be inserted, false otherwise */ -bool bch_insert_fixup_key(struct btree *b, struct bkey_i *insert, +bool bch_insert_fixup_key(struct cache_set *c, struct btree *b, + struct bkey_i *insert, struct btree_node_iter *iter, struct bch_replace_info *replace, struct bpos *done, @@ -123,13 +124,13 @@ bool bch_insert_fixup_key(struct btree *b, struct bkey_i *insert, { const struct bkey_format *f = &b->keys.format; struct bkey_packed *k; - int c; + int cmp; BUG_ON(replace); while ((k = bch_btree_node_iter_peek_all(iter, &b->keys)) && - (c = bkey_cmp_packed(f, k, &insert->k)) <= 0) { - if (!c && !bkey_deleted(k)) { + (cmp = bkey_cmp_packed(f, k, &insert->k)) <= 0) { + if (!cmp && !bkey_deleted(k)) { k->type = KEY_TYPE_DELETED; btree_keys_account_key_drop(&b->keys, k); } @@ -137,7 +138,7 @@ bool bch_insert_fixup_key(struct btree *b, struct bkey_i *insert, bch_btree_node_iter_next_all(iter, &b->keys); } - bch_btree_insert_and_journal(b, iter, insert, res); + bch_btree_insert_and_journal(c, b, iter, insert, res); return true; } @@ -181,7 +182,8 @@ static bool should_drop_ptr(const struct cache_set *c, return (ca = PTR_CACHE(c, ptr)) && ptr_stale(ca, ptr); } -unsigned bch_extent_nr_ptrs_after_normalize(const struct btree *b, +unsigned bch_extent_nr_ptrs_after_normalize(struct cache_set *c, + const struct btree *b, const struct bkey_packed *k) { const struct bkey_format *f = &b->keys.format; @@ -204,7 +206,7 @@ unsigned bch_extent_nr_ptrs_after_normalize(const struct btree *b, rcu_read_lock(); for (ptr = 0; ptr < bkeyp_extent_ptrs(f, k); ptr++) - if (!should_drop_ptr(b->c, e, &e->ptr[ptr], + if (!should_drop_ptr(c, e, &e->ptr[ptr], bkeyp_extent_ptrs(f, k))) ret++; rcu_read_unlock(); @@ -342,10 +344,9 @@ static const char *bch_ptr_status(const struct cache_set *c, return ""; } -static void bch_extent_to_text(const struct btree *b, char *buf, +static void bch_extent_to_text(struct cache_set *c, char *buf, size_t size, struct bkey_s_c k) { - struct cache_set *c = b->c; struct bkey_s_c_extent e; char *out = buf, *end = buf + size; const struct bch_extent_ptr *ptr; @@ -386,11 +387,11 @@ static bool bch_btree_ptr_invalid(const struct cache_set *c, struct bkey_s_c k) __ptr_invalid(c, k); } -static void btree_ptr_debugcheck(struct btree *b, struct bkey_s_c k) +static void btree_ptr_debugcheck(struct cache_set *c, struct btree *b, + struct bkey_s_c k) { struct bkey_s_c_extent e = bkey_s_c_to_extent(k); const struct bch_extent_ptr *ptr; - struct cache_set *c = b->c; unsigned seq; const char *err; char buf[160]; @@ -404,7 +405,7 @@ static void btree_ptr_debugcheck(struct btree *b, struct bkey_s_c k) } if (bch_extent_ptrs(e) < CACHE_SET_META_REPLICAS_HAVE(&c->sb)) { - bch_bkey_val_to_text(b, buf, sizeof(buf), k); + bch_bkey_val_to_text(c, b, buf, sizeof(buf), k); cache_set_bug(c, "btree key bad (too few replicas, %u < %llu): %s", bch_extent_ptrs(e), @@ -437,7 +438,7 @@ static void btree_ptr_debugcheck(struct btree *b, struct bkey_s_c k) return; err: - bch_bkey_val_to_text(b, buf, sizeof(buf), k); + bch_bkey_val_to_text(c, b, buf, sizeof(buf), k); btree_bug(b, "%s btree pointer %s: bucket %zi prio %i " "gen %i last_gc %i mark %08x", err, buf, PTR_BUCKET_NR(ca, ptr), @@ -838,53 +839,55 @@ stale: return -1; } -static int bch_add_sectors(struct btree *b, struct bkey_s_c k, - u64 offset, int sectors, bool fail_if_stale) +static int bch_add_sectors(struct cache_set *c, struct btree *b, + struct bkey_s_c k, u64 offset, + int sectors, bool fail_if_stale) { if (sectors && k.k->type == BCH_EXTENT) { struct bkey_s_c_extent e = bkey_s_c_to_extent(k); int ret; - ret = __bch_add_sectors(b->c, b, e, offset, + ret = __bch_add_sectors(c, b, e, offset, sectors, fail_if_stale); if (ret) return ret; if (!EXTENT_CACHED(e.v)) - bcache_dev_sectors_dirty_add(b->c, e.k->p.inode, + bcache_dev_sectors_dirty_add(c, e.k->p.inode, offset, sectors); } return 0; } -static void bch_subtract_sectors(struct btree *b, struct bkey_s_c k, - u64 offset, int sectors) +static void bch_subtract_sectors(struct cache_set *c, struct btree *b, + struct bkey_s_c k, u64 offset, int sectors) { - bch_add_sectors(b, k, offset, -sectors, false); + bch_add_sectors(c, b, k, offset, -sectors, false); } /* These wrappers subtract exactly the sectors that we're removing from @k */ -static void bch_cut_subtract_back(struct btree *b, struct bpos where, - struct bkey_s k) +static void bch_cut_subtract_back(struct cache_set *c, struct btree *b, + struct bpos where, struct bkey_s k) { - bch_subtract_sectors(b, bkey_s_to_s_c(k), where.offset, + bch_subtract_sectors(c, b, bkey_s_to_s_c(k), where.offset, k.k->p.offset - where.offset); bch_cut_back(where, k.k); } -static void bch_cut_subtract_front(struct btree *b, struct bpos where, - struct bkey_s k) +static void bch_cut_subtract_front(struct cache_set *c, struct btree *b, + struct bpos where, struct bkey_s k) { - bch_subtract_sectors(b, bkey_s_to_s_c(k), bkey_start_offset(k.k), + bch_subtract_sectors(c, b, bkey_s_to_s_c(k), bkey_start_offset(k.k), where.offset - bkey_start_offset(k.k)); __bch_cut_front(where, k); } -static void bch_drop_subtract(struct btree *b, struct bkey_s k) +static void bch_drop_subtract(struct cache_set *c, struct btree *b, + struct bkey_s k) { if (k.k->size) - bch_subtract_sectors(b, bkey_s_to_s_c(k), + bch_subtract_sectors(c, b, bkey_s_to_s_c(k), bkey_start_offset(k.k), k.k->size); k.k->size = 0; __set_bkey_deleted(k.k); @@ -975,7 +978,7 @@ try_partial: * On return, there is room in @res for at least one more key of the same size * as @new. */ -static bool bkey_cmpxchg(struct btree *b, +static bool bkey_cmpxchg(struct cache_set *c, struct btree *b, struct btree_node_iter *iter, struct bkey_s_c k, struct bch_replace_info *replace, @@ -1015,13 +1018,13 @@ static bool bkey_cmpxchg(struct btree *b, * * The [**] are already known to match, so insert them. */ - bch_btree_insert_and_journal(b, iter, + bch_btree_insert_and_journal(c, b, iter, bch_key_split(*done, new), res); *inserted = true; } - bch_cut_subtract_front(b, bkey_start_pos(k.k), + bch_cut_subtract_front(c, b, bkey_start_pos(k.k), bkey_i_to_s(new)); /* advance @done from the end of prev key to the start of @k */ *done = bkey_start_pos(k.k); @@ -1043,7 +1046,7 @@ static bool bkey_cmpxchg(struct btree *b, * * The [**] are already known to match, so insert them. */ - bch_btree_insert_and_journal(b, iter, + bch_btree_insert_and_journal(c, b, iter, bch_key_split(*done, new), res); *inserted = true; @@ -1051,9 +1054,9 @@ static bool bkey_cmpxchg(struct btree *b, /* update @new to be the part we haven't checked yet */ if (bkey_cmp(k.k->p, new->k.p) > 0) - bch_drop_subtract(b, bkey_i_to_s(new)); + bch_drop_subtract(c, b, bkey_i_to_s(new)); else - bch_cut_subtract_front(b, k.k->p, bkey_i_to_s(new)); + bch_cut_subtract_front(c, b, k.k->p, bkey_i_to_s(new)); } else replace->successes += 1; @@ -1063,7 +1066,7 @@ static bool bkey_cmpxchg(struct btree *b, } /* We are trying to insert a key with an older version than the existing one */ -static void handle_existing_key_newer(struct btree *b, +static void handle_existing_key_newer(struct cache_set *c, struct btree *b, struct btree_node_iter *iter, struct bkey_i *insert, const struct bkey *k, @@ -1077,12 +1080,12 @@ static void handle_existing_key_newer(struct btree *b, switch (bch_extent_overlap(k, &insert->k)) { case BCH_EXTENT_OVERLAP_FRONT: /* k and insert share the start, remove it from insert */ - bch_cut_subtract_front(b, k->p, bkey_i_to_s(insert)); + bch_cut_subtract_front(c, b, k->p, bkey_i_to_s(insert)); break; case BCH_EXTENT_OVERLAP_BACK: /* k and insert share the end, remove it from insert */ - bch_cut_subtract_back(b, bkey_start_pos(k), + bch_cut_subtract_back(c, b, bkey_start_pos(k), bkey_i_to_s(insert)); break; @@ -1102,14 +1105,14 @@ static void handle_existing_key_newer(struct btree *b, * entry to @res. */ split = bch_key_split(bkey_start_pos(k), insert), - bch_cut_subtract_front(b, k->p, bkey_i_to_s(insert)); - bch_btree_insert_and_journal(b, iter, split, res); + bch_cut_subtract_front(c, b, k->p, bkey_i_to_s(insert)); + bch_btree_insert_and_journal(c, b, iter, split, res); *inserted = true; break; case BCH_EXTENT_OVERLAP_ALL: /* k completely covers insert -- drop insert */ - bch_drop_subtract(b, bkey_i_to_s(insert)); + bch_drop_subtract(c, b, bkey_i_to_s(insert)); break; } } @@ -1157,7 +1160,8 @@ static void handle_existing_key_newer(struct btree *b, * If the end of done is not the same as the end of insert, then * key insertion needs to continue/be retried. */ -bool bch_insert_fixup_extent(struct btree *b, struct bkey_i *insert, +bool bch_insert_fixup_extent(struct cache_set *c, struct btree *b, + struct bkey_i *insert, struct btree_node_iter *iter, struct bch_replace_info *replace, struct bpos *done, @@ -1202,7 +1206,7 @@ bool bch_insert_fixup_extent(struct btree *b, struct bkey_i *insert, * can also insert keys with stale pointers, but for those we still need * to proceed with the insertion. */ - if (bch_add_sectors(b, bkey_i_to_s_c(insert), + if (bch_add_sectors(c, b, bkey_i_to_s_c(insert), bkey_start_offset(&insert->k), insert->k.size, !!(flags & FAIL_IF_STALE))) { /* We raced - a dirty pointer was stale */ @@ -1240,7 +1244,7 @@ bool bch_insert_fixup_extent(struct btree *b, struct bkey_i *insert, * XXX: would be better to explicitly signal that we * need to split */ - bch_cut_subtract_back(b, *done, bkey_i_to_s(insert)); + bch_cut_subtract_back(c, b, *done, bkey_i_to_s(insert)); goto out; } @@ -1255,13 +1259,13 @@ bool bch_insert_fixup_extent(struct btree *b, struct bkey_i *insert, *done = bkey_cmp(k.k->p, insert->k.p) < 0 ? k.k->p : insert->k.p; else if (k.k->size && - !bkey_cmpxchg(b, iter, bkey_s_to_s_c(k), replace, + !bkey_cmpxchg(c, b, iter, bkey_s_to_s_c(k), replace, insert, done, &inserted, res)) continue; if (k.k->size && insert->k.version && insert->k.version < k.k->version) { - handle_existing_key_newer(b, iter, insert, k.k, + handle_existing_key_newer(c, b, iter, insert, k.k, &inserted, res); continue; } @@ -1271,13 +1275,14 @@ bool bch_insert_fixup_extent(struct btree *b, struct bkey_i *insert, switch (bch_extent_overlap(&insert->k, k.k)) { case BCH_EXTENT_OVERLAP_FRONT: /* insert and k share the start, invalidate in k */ - bch_cut_subtract_front(b, insert->k.p, k); + bch_cut_subtract_front(c, b, insert->k.p, k); extent_save(_k, k.k, f); break; case BCH_EXTENT_OVERLAP_BACK: /* insert and k share the end, invalidate in k */ - bch_cut_subtract_back(b, bkey_start_pos(&insert->k), k); + bch_cut_subtract_back(c, b, + bkey_start_pos(&insert->k), k); extent_save(_k, k.k, f); /* @@ -1294,7 +1299,7 @@ bool bch_insert_fixup_extent(struct btree *b, struct bkey_i *insert, if (!bkey_deleted(_k)) btree_keys_account_key_drop(&b->keys, _k); - bch_drop_subtract(b, k); + bch_drop_subtract(c, b, k); k.k->p = bkey_start_pos(&insert->k); extent_save(_k, k.k, f); @@ -1321,7 +1326,7 @@ bool bch_insert_fixup_extent(struct btree *b, struct bkey_i *insert, bch_cut_back(bkey_start_pos(&insert->k), &split.k.k); __bch_cut_front(bkey_start_pos(&insert->k), k); - bch_cut_subtract_front(b, insert->k.p, k); + bch_cut_subtract_front(c, b, insert->k.p, k); extent_save(_k, k.k, f); bch_bset_insert(&b->keys, iter, &split.k); @@ -1337,14 +1342,14 @@ bool bch_insert_fixup_extent(struct btree *b, struct bkey_i *insert, * we've processed, i.e. what insert was) */ if (replace != NULL) - bch_cut_subtract_back(b, *done, bkey_i_to_s(insert)); + bch_cut_subtract_back(c, b, *done, bkey_i_to_s(insert)); *done = orig_insert; } out: if (insert->k.size) { - bch_btree_insert_and_journal(b, iter, insert, res); + bch_btree_insert_and_journal(c, b, iter, insert, res); inserted = true; } @@ -1358,12 +1363,12 @@ static bool bch_extent_invalid(const struct cache_set *c, struct bkey_s_c k) __ptr_invalid(c, k); } -static void bch_extent_debugcheck(struct btree *b, struct bkey_s_c k) +static void bch_extent_debugcheck(struct cache_set *c, struct btree *b, + struct bkey_s_c k) { struct bkey_s_c_extent e = bkey_s_c_to_extent(k); const struct bch_extent_ptr *ptr; struct cache_member_rcu *mi; - struct cache_set *c = b->c; struct cache *ca; struct bucket *g; unsigned seq, stale; @@ -1376,7 +1381,7 @@ static void bch_extent_debugcheck(struct btree *b, struct bkey_s_c k) if (!EXTENT_CACHED(e.v) && bch_extent_ptrs(e) < CACHE_SET_DATA_REPLICAS_HAVE(&c->sb)) { - bch_bkey_val_to_text(b, buf, sizeof(buf), k); + bch_bkey_val_to_text(c, b, buf, sizeof(buf), k); cache_set_bug(c, "extent key bad (too few replicas, %u < %llu): %s", bch_extent_ptrs(e), @@ -1442,7 +1447,7 @@ static void bch_extent_debugcheck(struct btree *b, struct bkey_s_c k) replicas = CACHE_SET_DATA_REPLICAS_WANT(&c->sb); for (i = 0; i < CACHE_TIERS; i++) if (ptrs_per_tier[i] > replicas) { - bch_bkey_val_to_text(b, buf, sizeof(buf), k); + bch_bkey_val_to_text(c, b, buf, sizeof(buf), k); cache_set_bug(c, "extent key bad (too many tier %u replicas): %s", i, buf); @@ -1453,14 +1458,14 @@ static void bch_extent_debugcheck(struct btree *b, struct bkey_s_c k) return; bad_device: - bch_bkey_val_to_text(b, buf, sizeof(buf), k); + bch_bkey_val_to_text(c, b, buf, sizeof(buf), k); cache_set_bug(c, "extent pointer %u device missing: %s", (unsigned) (ptr - e.v->ptr), buf); cache_member_info_put(); return; bad_ptr: - bch_bkey_val_to_text(b, buf, sizeof(buf), k); + bch_bkey_val_to_text(c, b, buf, sizeof(buf), k); cache_set_bug(c, "extent pointer %u bad gc mark: %s:\nbucket %zu prio %i " "gen %i last_gc %i mark 0x%08x", (unsigned) (ptr - e.v->ptr), buf, PTR_BUCKET_NR(ca, ptr), @@ -1621,11 +1626,12 @@ static enum merge_result bch_extent_merge(struct btree_keys *bk, struct bkey_i *l, struct bkey_i *r) { struct btree *b = container_of(bk, struct btree, keys); + struct cache_set *c = b->c; struct bkey_s_extent el, er; struct cache *ca; unsigned i; - if (key_merging_disabled(b->c)) + if (key_merging_disabled(c)) return BCH_MERGE_NOMERGE; /* @@ -1666,7 +1672,7 @@ static enum merge_result bch_extent_merge(struct btree_keys *bk, * size so we can't check */ rcu_read_lock(); - if (!(ca = PTR_CACHE(b->c, &el.v->ptr[i])) || + if (!(ca = PTR_CACHE(c, &el.v->ptr[i])) || PTR_BUCKET_NR(ca, &el.v->ptr[i]) != PTR_BUCKET_NR(ca, &er.v->ptr[i])) { rcu_read_unlock(); diff --git a/drivers/md/bcache/extents.h b/drivers/md/bcache/extents.h index 8c032e52b17b..e8bef3083890 100644 --- a/drivers/md/bcache/extents.h +++ b/drivers/md/bcache/extents.h @@ -10,10 +10,9 @@ void bch_key_sort_fix_overlapping(struct btree_keys *, struct bset *, void bch_extent_sort_fix_overlapping(struct btree_keys *, struct bset *, struct btree_node_iter *); -bool bch_insert_fixup_key(struct btree *, struct bkey_i *, - struct btree_node_iter *, - struct bch_replace_info *, - struct bpos *, +bool bch_insert_fixup_key(struct cache_set *, struct btree *, + struct bkey_i *, struct btree_node_iter *, + struct bch_replace_info *, struct bpos *, struct journal_res *); extern const struct bkey_ops bch_bkey_btree_ops; @@ -38,13 +37,13 @@ static inline struct cache *bch_extent_pick_ptr(struct cache_set *c, return bch_extent_pick_ptr_avoiding(c, k, ptr, NULL); } -bool bch_insert_fixup_extent(struct btree *, struct bkey_i *, - struct btree_node_iter *, +bool bch_insert_fixup_extent(struct cache_set *, struct btree *, + struct bkey_i *, struct btree_node_iter *, struct bch_replace_info *, struct bpos *, - struct journal_res *, - unsigned); + struct journal_res *, unsigned); -unsigned bch_extent_nr_ptrs_after_normalize(const struct btree *, +unsigned bch_extent_nr_ptrs_after_normalize(struct cache_set *, + const struct btree *, const struct bkey_packed *); void bch_extent_drop_stale(struct cache_set *c, struct bkey_s); bool bch_extent_normalize(struct cache_set *, struct bkey_s); diff --git a/drivers/md/bcache/gc.c b/drivers/md/bcache/gc.c index db433b50ec63..8a8d645374c8 100644 --- a/drivers/md/bcache/gc.c +++ b/drivers/md/bcache/gc.c @@ -118,14 +118,14 @@ bool btree_gc_mark_node(struct cache_set *c, struct btree *b, for_each_btree_node_key(&b->keys, k, &iter) { bkey_disassemble(&tup, f, k); - bkey_debugcheck(b, bkey_tup_to_s_c(&tup)); + bkey_debugcheck(c, b, bkey_tup_to_s_c(&tup)); stale = max(stale, btree_mark_key(c, b, bkey_tup_to_s_c(&tup))); keys++; - u64s = bch_extent_nr_ptrs_after_normalize(b, k); + u64s = bch_extent_nr_ptrs_after_normalize(c, b, k); if (stat && u64s) { good_keys++; @@ -420,7 +420,7 @@ static void bch_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES], block_bytes(c)) > blocks) return; - if (btree_check_reserve(parent, NULL, iter->btree_id, + if (btree_check_reserve(c, parent, NULL, iter->btree_id, nr_old_nodes, false) || bch_keylist_realloc(&keylist, (BKEY_U64s + BKEY_EXTENT_MAX_U64s) * nr_old_nodes)) { @@ -447,12 +447,12 @@ static void bch_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES], for (i = 0; i < nr_old_nodes; i++) { closure_sync(&cl); - bch_btree_push_journal_seq(old_nodes[i], &cl); + bch_btree_push_journal_seq(c, old_nodes[i], &cl); } /* Repack everything with @new_format and sort down to one bset */ for (i = 0; i < nr_old_nodes; i++) - new_nodes[i] = __btree_node_alloc_replacement(old_nodes[i], + new_nodes[i] = __btree_node_alloc_replacement(c, old_nodes[i], new_format); /* @@ -489,7 +489,7 @@ static void bch_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES], s1->u64s += s2->u64s; six_unlock_write(&n2->lock); - btree_node_free(n2); + btree_node_free(c, n2); six_unlock_intent(&n2->lock); memmove(new_nodes + i - 1, @@ -556,7 +556,7 @@ static void bch_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES], /* Insert the newly coalesced nodes */ ret = bch_btree_insert_node(parent, iter, &keylist, - NULL, NULL, NULL, 0); + NULL, NULL, 0); BUG_ON(ret || !bch_keylist_empty(&keylist)); iter->pos = saved_pos; @@ -567,7 +567,7 @@ static void bch_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES], /* Free the old nodes and update our sliding window */ for (i = 0; i < nr_old_nodes; i++) { - btree_node_free(old_nodes[i]); + btree_node_free(c, old_nodes[i]); six_unlock_intent(&old_nodes[i]->lock); old_nodes[i] = new_nodes[i]; } diff --git a/drivers/md/bcache/inode.c b/drivers/md/bcache/inode.c index f54c89428574..cbec5028e1d2 100644 --- a/drivers/md/bcache/inode.c +++ b/drivers/md/bcache/inode.c @@ -78,7 +78,7 @@ static bool bch_inode_invalid(const struct cache_set *c, struct bkey_s_c k) } } -static void bch_inode_to_text(const struct btree *b, char *buf, +static void bch_inode_to_text(struct cache_set *c, char *buf, size_t size, struct bkey_s_c k) { struct bkey_s_c_inode inode; @@ -127,7 +127,7 @@ again: inode->k.p.inode, inode->k.u64s); ret = bch_btree_insert_at(&iter, &keylist_single(inode), - NULL, NULL, NULL, + NULL, NULL, BTREE_INSERT_ATOMIC); if (ret == -EINTR || ret == -EAGAIN) diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index 2cb909fa60ce..70051a06e706 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c @@ -769,7 +769,7 @@ int bch_discard(struct cache_set *c, struct bpos start, n = erase.k.p; ret = bch_btree_insert_at(&iter, &keylist_single(&erase), - NULL, NULL, NULL, 0); + NULL, NULL, 0); if (ret) break; diff --git a/drivers/md/bcache/migrate.c b/drivers/md/bcache/migrate.c index 7fe4935d6da9..bbe792ec0db5 100644 --- a/drivers/md/bcache/migrate.c +++ b/drivers/md/bcache/migrate.c @@ -552,10 +552,8 @@ static int bch_flag_key_bad(struct btree_iter *iter, */ bch_extent_normalize(c, bkey_i_to_s(&tmp.key)); - return bch_btree_insert_at(iter, - &keylist_single(&tmp.key), - NULL, NULL, NULL, - BTREE_INSERT_ATOMIC); + return bch_btree_insert_at(iter, &keylist_single(&tmp.key), + NULL, NULL, BTREE_INSERT_ATOMIC); } /* diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 01febc9f5154..b43126105212 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -345,7 +345,7 @@ static int cached_dev_cache_miss(struct btree_iter *iter, struct search *s, if (!(bio->bi_opf & REQ_RAHEAD) && !(bio->bi_opf & REQ_META) && ((u64) sectors_available(dc->disk.c) * 100 < - (u64) b->c->capacity * CUTOFF_CACHE_READA)) + (u64) iter->c->capacity * CUTOFF_CACHE_READA)) reada = min_t(sector_t, dc->readahead >> 9, bdev_sectors(bio->bi_bdev) - bio_end_sector(bio)); #endif |