diff options
-rw-r--r-- | drivers/md/bcache/btree.c | 71 | ||||
-rw-r--r-- | drivers/md/bcache/btree.h | 19 | ||||
-rw-r--r-- | drivers/md/bcache/request.c | 72 | ||||
-rw-r--r-- | drivers/md/bcache/request.h | 1 |
4 files changed, 89 insertions, 74 deletions
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 7ccad24d1343..f57ff433c4b8 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -1177,15 +1177,13 @@ static int __btree_check_reserve(struct cache_set *c, struct btree_op *op, struct btree *bch_btree_root_alloc(struct cache_set *c, enum btree_id id) { - struct closure cl; struct btree_op op; bch_btree_op_init(&op, id, SHRT_MAX); - closure_init_stack(&cl); while (1) { - if (__btree_check_reserve(c, &op, id, 1, &cl)) { - closure_sync(&cl); + if (__btree_check_reserve(c, &op, id, 1, &op.cl)) { + closure_sync(&op.cl); continue; } @@ -1193,13 +1191,13 @@ struct btree *bch_btree_root_alloc(struct cache_set *c, enum btree_id id) } } -static int btree_check_reserve(struct btree *b, struct btree_op *op, - struct closure *cl) +static int btree_check_reserve(struct btree *b, struct btree_op *op) { enum btree_id id = b->btree_id; unsigned required = (b->c->btree_roots[id]->level - b->level) * 2 + 1; - return __btree_check_reserve(b->c, op, id, required, cl); + return __btree_check_reserve(b->c, op, id, required, + op ? &op->cl : NULL); } /* Garbage collection */ @@ -1321,7 +1319,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, bch_keylist_init(&keylist); /* If we can't allocate new nodes, just keep going */ - if (btree_check_reserve(b, NULL, NULL)) + if (btree_check_reserve(b, NULL)) return 0; memset(new_nodes, 0, sizeof(new_nodes)); @@ -1352,7 +1350,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, * before as an optimization to potentially avoid a bunch of expensive * allocs/sorts */ - if (btree_check_reserve(b, NULL, NULL)) + if (btree_check_reserve(b, NULL)) goto out_nocoalesce; for (i = 0; i < nodes; i++) @@ -1443,7 +1441,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, } /* Insert the newly coalesced nodes */ - bch_btree_insert_node(b, op, &keylist, NULL, NULL, false); + bch_btree_insert_node(b, op, &keylist, NULL, false); BUG_ON(!bch_keylist_empty(&keylist)); /* Free the old nodes and update our sliding window */ @@ -1490,14 +1488,14 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op, struct keylist keys; struct btree *n; - if (btree_check_reserve(b, NULL, NULL)) + if (btree_check_reserve(b, NULL)) return 0; n = btree_node_alloc_replacement(replace, NULL); BUG_ON(!n); /* recheck reserve after allocating replacement node */ - if (btree_check_reserve(b, NULL, NULL)) { + if (btree_check_reserve(b, NULL)) { btree_node_free(n); rw_unlock(true, n); return 0; @@ -1508,7 +1506,7 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op, bch_keylist_init(&keys); bch_keylist_add(&keys, &n->key); - bch_btree_insert_node(b, op, &keys, NULL, NULL, false); + bch_btree_insert_node(b, op, &keys, NULL, false); BUG_ON(!bch_keylist_empty(&keys)); btree_node_free(replace); @@ -2073,7 +2071,7 @@ enum btree_insert_status { static enum btree_insert_status bch_btree_insert_keys(struct btree *b, struct btree_op *op, struct keylist *insert_keys, struct bkey *replace_key, - struct closure *parent, bool flush) + bool flush) { bool inserted = false, attempted = false, need_split = false; int oldsize = bch_count_data(&b->keys); @@ -2145,7 +2143,7 @@ bch_btree_insert_keys(struct btree *b, struct btree_op *op, if (journal_write) bch_journal_write_put(b->c, journal_write, (bch_keylist_empty(insert_keys) && flush) - ? parent : NULL); + ? &op->cl : NULL); if (attempted && !inserted) op->insert_collision = true; @@ -2160,7 +2158,6 @@ bch_btree_insert_keys(struct btree *b, struct btree_op *op, static int btree_split(struct btree *b, struct btree_op *op, struct keylist *insert_keys, struct bkey *replace_key, - struct closure *parent, bool flush) { struct btree *n1, *n2 = NULL, *n3 = NULL; @@ -2175,7 +2172,7 @@ static int btree_split(struct btree *b, struct btree_op *op, bch_keylist_init(&parent_keys); /* After this check we cannot return -EAGAIN anymore */ - ret = btree_check_reserve(b, op, parent); + ret = btree_check_reserve(b, op); if (ret) { /* If splitting an interior node, we've already split a leaf, * so we should have checked for sufficient reserve. We can't @@ -2207,8 +2204,7 @@ static int btree_split(struct btree *b, struct btree_op *op, * version of the btree node. */ if (b->level) { - bch_btree_insert_keys(n1, op, insert_keys, replace_key, - parent, flush); + bch_btree_insert_keys(n1, op, insert_keys, replace_key, flush); /* * There might be duplicate (deleted) keys after the @@ -2289,7 +2285,7 @@ static int btree_split(struct btree *b, struct btree_op *op, /* Depth increases, make a new root */ mutex_lock(&n3->write_lock); bkey_copy_key(&n3->key, &MAX_KEY); - bch_btree_insert_keys(n3, op, &parent_keys, NULL, NULL, false); + bch_btree_insert_keys(n3, op, &parent_keys, NULL, false); bch_btree_node_write(n3, &cl); mutex_unlock(&n3->write_lock); @@ -2304,8 +2300,7 @@ static int btree_split(struct btree *b, struct btree_op *op, /* Split a non root node */ closure_sync(&cl); - bch_btree_insert_node(b->parent, op, &parent_keys, - NULL, NULL, false); + bch_btree_insert_node(b->parent, op, &parent_keys, NULL, false); BUG_ON(!bch_keylist_empty(&parent_keys)); } @@ -2325,7 +2320,8 @@ static int btree_split(struct btree *b, struct btree_op *op, * @op: pointer to struct btree_op * @insert_keys: list of keys to insert * @replace_key: old key for compare exchange - * @parent: closure will wait on last key to be inserted + * @flush: if true, @op->cl won't return until last key written to + * journal * * This is top level for common btree insertion/index update code. The control * flow goes roughly like: @@ -2348,13 +2344,12 @@ static int btree_split(struct btree *b, struct btree_op *op, * if the full list is inserted. * * Return values: - * -EAGAIN: @parent was put on a waitlist waiting for btree node allocation. + * -EAGAIN: @op->cl was put on a waitlist waiting for btree node allocation. * -EINTR: locking changed, this function should be called again. */ int bch_btree_insert_node(struct btree *b, struct btree_op *op, struct keylist *insert_keys, struct bkey *replace_key, - struct closure *parent, bool flush) { struct closure cl; @@ -2368,8 +2363,7 @@ int bch_btree_insert_node(struct btree *b, struct btree_op *op, btree_node_lock_for_insert(b); op->iterator_invalidated = 1; - switch (bch_btree_insert_keys(b, op, insert_keys, - replace_key, parent, flush)) { + switch (bch_btree_insert_keys(b, op, insert_keys, replace_key, flush)) { case BTREE_INSERT_NO_INSERT: mutex_unlock(&b->write_lock); return 0; @@ -2407,8 +2401,7 @@ int bch_btree_insert_node(struct btree *b, struct btree_op *op, return -EINTR; } else { return btree_split(b, op, insert_keys, - replace_key, parent, - flush); + replace_key, flush); } default: BUG(); @@ -2426,12 +2419,11 @@ int bch_btree_insert_node_sync(struct btree *b, struct btree_op *op, struct closure cl; int ret; - closure_init_stack(&cl); + closure_init_stack(&op->cl); while (1) { ret = bch_btree_insert_node(b, op, insert_keys, - replace_key, &cl, - false); + replace_key, false); if (ret == -EAGAIN) closure_sync(&cl); else @@ -2448,11 +2440,11 @@ int bch_btree_insert_node_sync(struct btree *b, struct btree_op *op, * lose and not overwrite the key with stale data. * * Return values: - * -EAGAIN: @cl was put on a waitlist waiting for btree node allocation + * -EAGAIN: @op.cl was put on a waitlist waiting for btree node allocation * -EINTR: btree node was changed while upgrading to write lock */ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op, - struct bkey *check_key, struct closure *cl) + struct bkey *check_key) { int ret = -EINTR; u64 btree_ptr = b->key.val[0]; @@ -2481,7 +2473,7 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op, bch_keylist_add(&insert, check_key); - ret = bch_btree_insert_node(b, op, &insert, NULL, cl, false); + ret = bch_btree_insert_node(b, op, &insert, NULL, false); out: if (upgrade) downgrade_write(&b->lock); @@ -2489,7 +2481,6 @@ out: } struct btree_insert_op { - struct closure cl; struct btree_op op; struct keylist *keys; struct bkey *replace_key; @@ -2501,8 +2492,7 @@ static int btree_insert_fn(struct btree_op *b_op, struct btree *b) struct btree_insert_op, op); int ret = bch_btree_insert_node(b, &op->op, op->keys, - op->replace_key, &op->cl, - false); + op->replace_key, false); return bch_keylist_empty(op->keys) ? MAP_DONE : ret; } @@ -2520,9 +2510,6 @@ int bch_btree_insert(struct cache_set *c, enum btree_id id, struct btree_insert_op op; int ret = 0; - BUG_ON(bch_keylist_empty(keys)); - - closure_init_stack(&op.cl); bch_btree_op_init(&op.op, id, 0); op.keys = keys; op.replace_key = replace_key; @@ -2538,7 +2525,7 @@ int bch_btree_insert(struct cache_set *c, enum btree_id id, } if (ret == -EAGAIN) - closure_sync(&op.cl); + closure_sync(&op.op.cl); else if (op.op.insert_collision) return -ESRCH; diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h index c3b7972dff22..3bd5a17ecac8 100644 --- a/drivers/md/bcache/btree.h +++ b/drivers/md/bcache/btree.h @@ -197,6 +197,8 @@ static inline void set_gc_sectors(struct cache_set *c) /* Recursing down the btree */ struct btree_op { + struct closure cl; + enum btree_id id; /* For allocating new nodes */ @@ -215,24 +217,33 @@ struct btree_op { }; /** + * __bch_btree_op_init - initialize btree op + * * @write_lock_level: -1 for read locks only * 0 for write lock on leaf * SHRT_MAX for write locks only + * + * Does not initialize @op->cl -- you must do that yourself. */ static inline void __bch_btree_op_init(struct btree_op *op, enum btree_id id, enum alloc_reserve reserve, int write_lock_level) { - memset(op, 0, sizeof(struct btree_op)); op->id = id; op->reserve = reserve; init_wait(&op->wait); op->lock = write_lock_level; + op->iterator_invalidated = 0; + op->insert_collision = 0; } +/** + * bch_btree_op_init - initialize synchronous btree op + */ static inline void bch_btree_op_init(struct btree_op *op, enum btree_id id, - int write_lock_level) + int write_lock_level) { + closure_init_stack(&op->cl); __bch_btree_op_init(op, id, id, write_lock_level); } @@ -265,11 +276,11 @@ struct btree *bch_btree_node_get(struct cache_set *, struct btree_op *, struct btree *); int bch_btree_insert_check_key(struct btree *, struct btree_op *, - struct bkey *, struct closure *); + struct bkey *); int bch_btree_insert(struct cache_set *, enum btree_id, struct keylist *, struct bkey *); int bch_btree_insert_node(struct btree *, struct btree_op *, struct keylist *, - struct bkey *, struct closure *, bool); + struct bkey *, bool); int bch_btree_insert_node_sync(struct btree *, struct btree_op *, struct keylist *, struct bkey *); diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index a616e8f1cf3a..7827352f3719 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -80,36 +80,15 @@ static int btree_insert_fn(struct btree_op *b_op, struct btree *b) struct bkey *replace_key = op->replace ? &op->replace_key : NULL; int ret = bch_btree_insert_node(b, &op->op, &op->insert_keys, - replace_key, &op->cl, - op->flush); + replace_key, op->flush); return bch_keylist_empty(&op->insert_keys) ? MAP_DONE : ret; } -/** - * bch_data_insert_keys - insert extent btree keys for a write - */ -static void bch_data_insert_keys(struct closure *cl) +static void bch_data_insert_keys_done(struct closure *cl) { struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); - struct keylist *keys = &op->insert_keys; - enum btree_id id = BTREE_ID_EXTENTS; - enum alloc_reserve reserve; - int ret = 0; unsigned i; - reserve = bch_btree_reserve(op); - __bch_btree_op_init(&op->op, id, reserve, 0); - - while (!ret && !bch_keylist_empty(keys)) { - op->op.lock = 0; - ret = bch_btree_map_leaf_nodes(&op->op, op->c, - &START_KEY(keys->keys), - btree_insert_fn); - } - - if (ret == -EAGAIN) - continue_at(cl, bch_data_insert_keys, op->c->btree_insert_wq); - if (op->op.insert_collision) op->replace_collision = true; @@ -126,6 +105,42 @@ static void bch_data_insert_keys(struct closure *cl) closure_return(cl); } +static void __bch_data_insert_keys(struct closure *cl) +{ + struct data_insert_op *op = container_of(cl, struct data_insert_op, + op.cl); + struct keylist *keys = &op->insert_keys; + int ret = 0; + + while (!ret && !bch_keylist_empty(keys)) { + op->op.lock = 0; + ret = bch_btree_map_leaf_nodes(&op->op, op->c, + &START_KEY(keys->keys), + btree_insert_fn); + } + + if (ret == -EAGAIN) + continue_at(cl, __bch_data_insert_keys, op->c->btree_insert_wq); + + closure_return(cl); +} + +/** + * bch_data_insert_keys - insert extent btree keys for a write + */ +static void bch_data_insert_keys(struct closure *cl) +{ + struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); + enum btree_id id = BTREE_ID_EXTENTS; + enum alloc_reserve reserve; + + reserve = bch_btree_reserve(op); + __bch_btree_op_init(&op->op, id, reserve, 0); + + closure_call(&op->op.cl, __bch_data_insert_keys, NULL, cl); + continue_at(cl, bch_data_insert_keys_done, op->c->btree_insert_wq); +} + /** * bch_data_invalidate - discard range of keys * @@ -899,11 +914,12 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) */ static void cache_lookup(struct closure *cl) { - struct search *s = container_of(cl, struct search, iop.cl); + struct search *s = container_of(cl, struct search, op.cl); + enum btree_id id = BTREE_ID_EXTENTS; struct bio *bio = &s->bio.bio; int ret; - bch_btree_op_init(&s->op, BTREE_ID_EXTENTS, -1); + __bch_btree_op_init(&s->op, id, id, -1); ret = bch_btree_map_keys(&s->op, s->iop.c, &KEY(s->inode, bio->bi_iter.bi_sector, 0), @@ -1094,7 +1110,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, replace.key = KEY(s->inode, bio->bi_iter.bi_sector + sectors, sectors); - ret = bch_btree_insert_check_key(b, &s->op, &replace.key, &s->cl); + ret = bch_btree_insert_check_key(b, &s->op, &replace.key); if (ret) return ret; @@ -1115,7 +1131,7 @@ static void cached_dev_read(struct cached_dev *dc, struct search *s) { struct closure *cl = &s->cl; - closure_call(&s->iop.cl, cache_lookup, NULL, cl); + closure_call(&s->op.cl, cache_lookup, NULL, cl); continue_at(cl, cached_dev_read_done_bh, NULL); } @@ -1388,7 +1404,7 @@ static void __flash_dev_make_request(struct request_queue *q, struct bio *bio) closure_call(&s->iop.cl, bch_data_insert, NULL, cl); } else { - closure_call(&s->iop.cl, cache_lookup, NULL, cl); + closure_call(&s->op.cl, cache_lookup, NULL, cl); } continue_at(cl, search_free, NULL); diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h index a322ba15eba7..c97cb0477d7e 100644 --- a/drivers/md/bcache/request.h +++ b/drivers/md/bcache/request.h @@ -14,6 +14,7 @@ struct data_insert_op { struct workqueue_struct *wq; struct bio *bio; + /* Used internally, do not touch */ struct btree_op op; uint16_t write_point; |