summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2016-01-25 14:36:01 -0900
committerKent Overstreet <kent.overstreet@gmail.com>2016-10-07 12:35:24 -0800
commita0691c1298f269e6543158c724b45b8ae05d6b52 (patch)
tree4c44604bfe5054c873905eafb3dd7f963270b92e
parent452d72a464abaf249448ed43eba9b1d6507c964a (diff)
bcache: more insert path refactoring
-rw-r--r--drivers/md/bcache/btree_io.c10
-rw-r--r--drivers/md/bcache/btree_update.c47
-rw-r--r--drivers/md/bcache/extents.c28
-rw-r--r--drivers/md/bcache/extents.h4
-rw-r--r--include/trace/events/bcache.h11
5 files changed, 39 insertions, 61 deletions
diff --git a/drivers/md/bcache/btree_io.c b/drivers/md/bcache/btree_io.c
index 6d08f8e1b7df..ea8f2f967bc7 100644
--- a/drivers/md/bcache/btree_io.c
+++ b/drivers/md/bcache/btree_io.c
@@ -130,7 +130,15 @@ sort:
return true;
}
-/* Returns true if we sorted (i.e. invalidated iterators */
+/*
+ * @bch_btree_init_next - initialize a new (unwritten) bset that can then be
+ * inserted into
+ *
+ * Safe to call if there already is an unwritten bset - will only add a new bset
+ * if @b doesn't already have one.
+ *
+ * Returns true if we sorted (i.e. invalidated iterators
+ */
void bch_btree_init_next(struct cache_set *c, struct btree *b,
struct btree_iter *iter)
{
diff --git a/drivers/md/bcache/btree_update.c b/drivers/md/bcache/btree_update.c
index 27eddd8026d9..9c81b9d7516d 100644
--- a/drivers/md/bcache/btree_update.c
+++ b/drivers/md/bcache/btree_update.c
@@ -735,18 +735,16 @@ void bch_btree_insert_and_journal(struct btree_iter *iter,
* Returns true if an insert was actually done and @b was modified - false on a
* failed replace operation
*/
-static bool btree_insert_key(struct btree_iter *iter, struct btree *b,
+static void btree_insert_key(struct btree_iter *iter, struct btree *b,
struct btree_node_iter *node_iter,
struct keylist *insert_keys,
struct bch_replace_info *replace,
struct journal_res *res,
unsigned flags)
{
- bool dequeue = false;
struct bkey_i *insert = bch_keylist_front(insert_keys), *orig = insert;
BKEY_PADDED(key) temp;
s64 oldsize = bch_count_data(&b->keys);
- bool do_insert;
BUG_ON(bkey_deleted(&insert->k) && bkey_val_u64s(&insert->k));
bch_btree_node_iter_verify(node_iter, &b->keys);
@@ -755,15 +753,14 @@ static bool btree_insert_key(struct btree_iter *iter, struct btree *b,
BUG_ON(res->ref);
BUG_ON(replace);
- do_insert = bch_insert_fixup_btree_ptr(iter, b, insert,
- node_iter);
- dequeue = true;
+ bch_insert_fixup_btree_ptr(iter, b, insert, node_iter);
+ bch_keylist_dequeue(insert_keys);
} else if (!b->keys.ops->is_extents) {
BUG_ON(iter->nodes[0] != b ||
&iter->node_iters[0] != node_iter);
- do_insert = bch_insert_fixup_key(iter, insert, replace, res);
- dequeue = true;
+ bch_insert_fixup_key(iter, insert, replace, res);
+ bch_keylist_dequeue(insert_keys);
} else {
BUG_ON(iter->nodes[0] != b ||
&iter->node_iters[0] != node_iter);
@@ -774,20 +771,16 @@ static bool btree_insert_key(struct btree_iter *iter, struct btree *b,
if (bkey_cmp(insert->k.p, b->key.k.p) > 0)
bch_cut_back(b->key.k.p, &insert->k);
- do_insert = bch_insert_fixup_extent(iter, insert, replace,
- res, flags);
+ bch_insert_fixup_extent(iter, insert, replace, res, flags);
+
bch_cut_front(iter->pos, orig);
- dequeue = orig->k.size == 0;
+ if (orig->k.size == 0)
+ bch_keylist_dequeue(insert_keys);
}
- if (dequeue)
- bch_keylist_dequeue(insert_keys);
-
bch_count_data_verify(&b->keys, oldsize);
- trace_bcache_btree_insert_key(b, insert, replace != NULL, do_insert);
-
- return do_insert;
+ trace_bcache_btree_insert_key(b, insert, replace != NULL);
}
enum btree_insert_status {
@@ -1150,7 +1143,7 @@ bch_btree_insert_keys_leaf(struct btree *b,
u64 *journal_seq,
unsigned flags)
{
- bool done = false, inserted = false, need_split = false;
+ bool done = false, need_split = false;
struct journal_res res = { 0, 0 };
struct bkey_i *k = bch_keylist_front(insert_keys);
@@ -1202,11 +1195,8 @@ bch_btree_insert_keys_leaf(struct btree *b,
if (journal_res_full(&res, &k->k))
break;
- if (btree_insert_key(iter, b,
- &iter->node_iters[b->level],
- insert_keys, replace,
- &res, flags))
- inserted = true;
+ btree_insert_key(iter, b, &iter->node_iters[b->level],
+ insert_keys, replace, &res, flags);
}
btree_node_unlock_write(b, iter);
@@ -1216,8 +1206,7 @@ bch_btree_insert_keys_leaf(struct btree *b,
journal_seq);
}
- if (inserted)
- bch_btree_node_write_lazy(b, iter);
+ bch_btree_node_write_lazy(b, iter);
return need_split ? BTREE_INSERT_NEED_SPLIT : BTREE_INSERT_OK;
}
@@ -1789,10 +1778,10 @@ retry:
}
for (i = m; i < m + nr; i++)
- BUG_ON(!btree_insert_key(i->iter, i->iter->nodes[0],
- &i->iter->node_iters[0],
- &keylist_single(i->k), NULL,
- &res, flags));
+ btree_insert_key(i->iter, i->iter->nodes[0],
+ &i->iter->node_iters[0],
+ &keylist_single(i->k), NULL,
+ &res, flags);
do {
multi_unlock_write(m, --i);
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
index 5b3cd82d2a99..ff50f66a6a0e 100644
--- a/drivers/md/bcache/extents.c
+++ b/drivers/md/bcache/extents.c
@@ -105,7 +105,7 @@ struct btree_nr_keys bch_key_sort_fix_overlapping(struct btree_keys *b,
/* This returns true if insert should be inserted, false otherwise */
-bool bch_insert_fixup_key(struct btree_iter *iter,
+void bch_insert_fixup_key(struct btree_iter *iter,
struct bkey_i *insert,
struct bch_replace_info *replace,
struct journal_res *res)
@@ -134,7 +134,6 @@ bool bch_insert_fixup_key(struct btree_iter *iter,
}
bch_btree_insert_and_journal(iter, insert, res);
- return true;
}
/* Common among btree and extent ptrs */
@@ -942,7 +941,6 @@ static bool bkey_cmpxchg(struct btree_iter *iter,
struct bkey_s_c k,
struct bch_replace_info *replace,
struct bkey_i *new,
- bool *inserted,
struct journal_res *res)
{
struct bkey_i *old = &replace->key;
@@ -977,7 +975,6 @@ static bool bkey_cmpxchg(struct btree_iter *iter,
*/
bch_btree_insert_and_journal(iter,
bch_key_split(iter->pos, new), res);
- *inserted = true;
}
bch_cut_subtract_front(iter, bkey_start_pos(k.k),
@@ -1004,7 +1001,6 @@ static bool bkey_cmpxchg(struct btree_iter *iter,
*/
bch_btree_insert_and_journal(iter,
bch_key_split(iter->pos, new), res);
- *inserted = true;
}
/* update @new to be the part we haven't checked yet */
@@ -1025,7 +1021,6 @@ static bool bkey_cmpxchg(struct btree_iter *iter,
static void handle_existing_key_newer(struct btree_iter *iter,
struct bkey_i *insert,
const struct bkey *k,
- bool *inserted,
struct journal_res *res)
{
struct bkey_i *split;
@@ -1062,7 +1057,6 @@ static void handle_existing_key_newer(struct btree_iter *iter,
split = bch_key_split(bkey_start_pos(k), insert),
bch_cut_subtract_front(iter, k->p, bkey_i_to_s(insert));
bch_btree_insert_and_journal(iter, split, res);
- *inserted = true;
break;
case BCH_EXTENT_OVERLAP_ALL:
@@ -1109,15 +1103,11 @@ static void handle_existing_key_newer(struct btree_iter *iter,
* there may be another 0 size key between them in another bset, and it will
* thus overlap with the merged key.
*
- * This returns true if it inserted, false otherwise.
- * Note that it can return false due to failure or because there is no
- * room for the insertion -- the caller needs to split the btree node.
- *
* In addition, the end of iter->pos indicates how much has been processed.
* If the end of iter->pos is not the same as the end of insert, then
* key insertion needs to continue/be retried.
*/
-bool bch_insert_fixup_extent(struct btree_iter *iter,
+void bch_insert_fixup_extent(struct btree_iter *iter,
struct bkey_i *insert,
struct bch_replace_info *replace,
struct journal_res *res,
@@ -1132,7 +1122,6 @@ bool bch_insert_fixup_extent(struct btree_iter *iter,
struct bkey_tup tup;
struct bkey_s k;
BKEY_PADDED(k) split;
- bool inserted = false;
unsigned nr_done = 0;
u64 start_time = local_clock();
@@ -1173,7 +1162,7 @@ bool bch_insert_fixup_extent(struct btree_iter *iter,
insert->k.size = 0;
if (replace != NULL)
replace->failures += 1;
- return false;
+ return;
}
while (insert->k.size &&
@@ -1231,13 +1220,12 @@ bool bch_insert_fixup_extent(struct btree_iter *iter,
? k.k->p : insert->k.p);
else if (k.k->size &&
!bkey_cmpxchg(iter, k.s_c, replace,
- insert, &inserted, res))
+ insert, res))
continue;
if (k.k->size && insert->k.version &&
insert->k.version < k.k->version) {
- handle_existing_key_newer(iter, insert, k.k,
- &inserted, res);
+ handle_existing_key_newer(iter, insert, k.k, res);
continue;
}
@@ -1320,12 +1308,8 @@ bool bch_insert_fixup_extent(struct btree_iter *iter,
bch_btree_iter_set_pos(iter, insert_end);
}
out:
- if (insert->k.size) {
+ if (insert->k.size)
bch_btree_insert_and_journal(iter, insert, res);
- inserted = true;
- }
-
- return inserted;
}
static const char *bch_extent_invalid(const struct cache_set *c,
diff --git a/drivers/md/bcache/extents.h b/drivers/md/bcache/extents.h
index dd6b12dfd3d4..5d1d55c4189c 100644
--- a/drivers/md/bcache/extents.h
+++ b/drivers/md/bcache/extents.h
@@ -16,7 +16,7 @@ struct btree_nr_keys bch_extent_sort_fix_overlapping(struct btree_keys *,
struct bset *,
struct btree_node_iter *);
-bool bch_insert_fixup_key(struct btree_iter *, struct bkey_i *,
+void bch_insert_fixup_key(struct btree_iter *, struct bkey_i *,
struct bch_replace_info *, struct journal_res *);
extern const struct bkey_ops bch_bkey_btree_ops;
@@ -47,7 +47,7 @@ bch_extent_pick_ptr(struct cache_set *c, struct bkey_s_c k,
bch_extent_pick_ptr_avoiding(c, k, NULL, ret);
}
-bool bch_insert_fixup_extent(struct btree_iter *, struct bkey_i *,
+void bch_insert_fixup_extent(struct btree_iter *, struct bkey_i *,
struct bch_replace_info *,
struct journal_res *, unsigned);
diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
index 0049cd9b7d51..fc417f35af67 100644
--- a/include/trace/events/bcache.h
+++ b/include/trace/events/bcache.h
@@ -619,9 +619,8 @@ DEFINE_EVENT(btree_node_op, bcache_btree_intent_lock_fail,
);
TRACE_EVENT(bcache_btree_insert_key,
- TP_PROTO(struct btree *b, struct bkey_i *k, unsigned op,
- bool insert_done),
- TP_ARGS(b, k, op, insert_done),
+ TP_PROTO(struct btree *b, struct bkey_i *k, unsigned op),
+ TP_ARGS(b, k, op),
TP_STRUCT__entry(
__field(u64, b_bucket )
@@ -633,7 +632,6 @@ TRACE_EVENT(bcache_btree_insert_key,
__field(u8, level )
__field(u8, id )
__field(u8, op )
- __field(u8, insert_done )
),
TP_fast_assign(
@@ -646,11 +644,10 @@ TRACE_EVENT(bcache_btree_insert_key,
__entry->offset = k->k.p.offset;
__entry->size = k->k.size;
__entry->op = op;
- __entry->insert_done = insert_done;
),
- TP_printk("%u for %u bucket %llu(%u) id %u: %u:%llu %u:%llu len %u",
- __entry->insert_done, __entry->op,
+ TP_printk("%s at bucket %llu(%u) id %u: %u:%llu %u:%llu len %u",
+ __entry->op ? "replace" : "insert",
__entry->b_bucket, __entry->level, __entry->id,
__entry->b_inode, __entry->b_offset,
__entry->inode, __entry->offset, __entry->size)