summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2015-08-21 01:42:36 -0800
committerKent Overstreet <kent.overstreet@gmail.com>2017-01-18 20:26:54 -0900
commite70206fae53686bfaffc54bef1b8b14c1c3d5efd (patch)
treee15dc6a3dd7210d2da8c2abc9398856dab5def3b
parent0789d0c893dcd0c28f2d72010d7056be70573397 (diff)
bcache: Don't use btree_start_pos() in btree node iter comparison fn
the btree node iter comparison fn is a fast path, so we want to avoid computing bkey_start_pos() if we can - fortunately, now we can. This will get a lot more important with packed bkeys, since then we'll have to unpack the key to compute bkey_start_pos(). Unfortunately, when sorting extents and fixing overlapping extents (when reading a btree node in from disk) we still need to compare against bkey_start_pos() - since extent_sort_fixup() needs to see extents ordered by bkey_start_pos() and since there's overlaps the tricks we used to not compute bkey_start_pos() in the normal comparison fn don't work here. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
-rw-r--r--drivers/md/bcache/bset.c38
-rw-r--r--drivers/md/bcache/bset.h1
-rw-r--r--drivers/md/bcache/extents.c34
3 files changed, 34 insertions, 39 deletions
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index cca3c1c68c45..277467c610f6 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -16,6 +16,16 @@
#include <linux/random.h>
#include <linux/prefetch.h>
+static bool keys_out_of_order(struct bkey *prev, struct bkey *next,
+ bool is_extents)
+{
+ return bkey_cmp(prev->p, bkey_start_pos(next)) > 0 ||
+ ((is_extents
+ ? !bkey_deleted(next)
+ : !bkey_deleted(prev)) &&
+ !bkey_cmp(prev->p, next->p));
+}
+
#ifdef CONFIG_BCACHE_DEBUG
void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set)
@@ -106,15 +116,6 @@ void __bch_check_keys(struct btree_keys *b, const char *fmt, ...)
}
}
-static bool keys_out_of_order(struct bkey *prev, struct bkey *next,
- bool is_extents)
-{
- return bkey_cmp(prev->p, bkey_start_pos(next)) > 0 ||
- (!is_extents &&
- !bkey_deleted(prev) &&
- !bkey_cmp(prev->p, next->p));
-}
-
static void bch_btree_node_iter_next_check(struct btree_node_iter *iter)
{
struct btree_keys *b = iter->b;
@@ -825,7 +826,7 @@ void bch_bset_insert(struct btree_keys *b,
BUG_ON(where > bset_bkey_last(i));
while (where != bset_bkey_last(i) &&
- bkey_cmp(insert->p, bkey_start_pos(where)) > 0)
+ keys_out_of_order(insert, where, b->ops->is_extents))
prev = where, where = bkey_next(where);
if (!prev)
@@ -1036,9 +1037,20 @@ static inline bool btree_node_iter_cmp(struct btree_node_iter *iter,
struct btree_node_iter_set l,
struct btree_node_iter_set r)
{
- return iter->is_extents
- ? bkey_cmp(bkey_start_pos(l.k), bkey_start_pos(r.k)) > 0
- : bkey_cmp(l.k->p, r.k->p) > 0;
+ s64 c = bkey_cmp(l.k->p, r.k->p);
+
+ /*
+ * For non extents, when keys compare equal the deleted keys have to
+ * come first - so that bch_btree_node_iter_next_check() can detect
+ * duplicate nondeleted keys (and possibly other reasons?)
+ *
+ * For extents, bkey_deleted() is used as a proxy for k->size == 0, so
+ * deleted keys have to sort last.
+ */
+ return c ? c > 0
+ : iter->is_extents
+ ? bkey_deleted(l.k) > bkey_deleted(r.k)
+ : bkey_deleted(l.k) < bkey_deleted(r.k);
}
void bch_btree_node_iter_push(struct btree_node_iter *iter,
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
index bc6a45e462de..7a14b53a9ccc 100644
--- a/drivers/md/bcache/bset.h
+++ b/drivers/md/bcache/bset.h
@@ -380,7 +380,6 @@ static inline enum bch_extent_overlap bch_extent_overlap(const struct bkey *k,
/* Btree key iteration */
struct btree_node_iter {
- /* If true, compare bkey_start_pos(k) and not k itself. */
u8 is_extents;
unsigned size:24;
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
index 887dc464552e..6af7d34cee88 100644
--- a/drivers/md/bcache/extents.c
+++ b/drivers/md/bcache/extents.c
@@ -1026,30 +1026,6 @@ static void handle_existing_key_newer(struct btree *b,
}
}
-static void overwrite_full_key(struct btree *b, struct bkey *insert,
- struct btree_node_iter *iter,
- struct bkey *k)
-{
- if (!bkey_deleted(k))
- b->keys.nr_live_u64s -= k->u64s;
-
- bch_drop_subtract(b, k);
- /*
- * Completely overwrote, so if this key isn't in the
- * same bset as the one we're going to insert into we
- * can just set its size to 0, and not modify the
- * offset, and not have to invalidate/fix the auxiliary
- * search tree.
- *
- * Note: peek_overlapping() will think we still overlap,
- * so we need the explicit iter_next() call.
- */
- if (!bkey_written(&b->keys, k))
- k->p.offset = bkey_start_offset(insert);
-
- bch_btree_node_iter_advance(iter);
-}
-
/**
* bch_extent_insert_fixup - insert a new extent and deal with overlaps
*
@@ -1207,11 +1183,19 @@ bool bch_insert_fixup_extent(struct btree *b, struct bkey *insert,
* auxiliary tree.
*/
bch_bset_fix_invalidated_key(&b->keys, k);
+ bch_btree_node_iter_advance(iter);
break;
case BCH_EXTENT_OVERLAP_ALL:
/* The insert key completely covers k, invalidate k */
- overwrite_full_key(b, insert, iter, k);
+ if (!bkey_deleted(k))
+ b->keys.nr_live_u64s -= k->u64s;
+
+ bch_drop_subtract(b, k);
+ k->p = bkey_start_pos(insert);
+
+ bch_bset_fix_invalidated_key(&b->keys, k);
+ bch_btree_node_iter_advance(iter);
break;
case BCH_EXTENT_OVERLAP_MIDDLE: