summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2016-11-28 15:47:59 -0900
committerKent Overstreet <kent.overstreet@gmail.com>2017-01-18 21:41:12 -0900
commite4c6d267652566d655d9c8a750094fcded2a31c6 (patch)
tree07d3c8776ad1903e265493188798de2c02e4543b
parentdf20556247c8d1e22544e293ee9894cba95151e6 (diff)
bcache: packed bkeys refactoring
-rw-r--r--drivers/md/bcache/bkey.c100
-rw-r--r--drivers/md/bcache/bkey.h158
-rw-r--r--drivers/md/bcache/bset.c112
-rw-r--r--drivers/md/bcache/bset.h58
-rw-r--r--drivers/md/bcache/btree_gc.c2
-rw-r--r--drivers/md/bcache/btree_io.c39
-rw-r--r--drivers/md/bcache/btree_iter.c36
-rw-r--r--drivers/md/bcache/btree_update.c36
-rw-r--r--drivers/md/bcache/extents.c33
-rw-r--r--drivers/md/bcache/util.h78
10 files changed, 329 insertions, 323 deletions
diff --git a/drivers/md/bcache/bkey.c b/drivers/md/bcache/bkey.c
index 484cd4956ae2..90cc2a5a10f5 100644
--- a/drivers/md/bcache/bkey.c
+++ b/drivers/md/bcache/bkey.c
@@ -9,6 +9,9 @@
const struct bkey_format bch_bkey_format_current = BKEY_FORMAT_CURRENT;
+struct bkey __bkey_unpack_key(const struct bkey_format *,
+ const struct bkey_packed *);
+
void bch_to_binary(char *out, const u64 *p, unsigned nr_bits)
{
unsigned bit = high_bit_offset, done = 0;
@@ -44,7 +47,7 @@ static void bch_bkey_pack_verify(const struct bkey_packed *packed,
BUG_ON(packed->u64s < bkeyp_key_u64s(format, packed));
- tmp = bkey_unpack_key(format, packed);
+ tmp = __bkey_unpack_key(format, packed);
if (memcmp(&tmp, unpacked, sizeof(struct bkey))) {
char buf1[160], buf2[160];
@@ -224,8 +227,6 @@ static bool bch_bkey_transform_key(const struct bkey_format *out_f,
struct unpack_state in_s = unpack_state_init(in_f, in);
unsigned i;
- EBUG_ON(bkey_unpack_key(in_f, in).size);
-
out->_data[0] = 0;
for (i = 0; i < BKEY_NR_FIELDS; i++)
@@ -257,8 +258,8 @@ bool bch_bkey_transform(const struct bkey_format *out_f,
return true;
}
-static struct bkey __bkey_unpack_key(const struct bkey_format *format,
- const struct bkey_packed *in)
+struct bkey __bkey_unpack_key(const struct bkey_format *format,
+ const struct bkey_packed *in)
{
struct unpack_state state = unpack_state_init(format, in);
struct bkey out;
@@ -382,26 +383,25 @@ bool bkey_pack_key(struct bkey_packed *out, const struct bkey *in,
* bkey_unpack_key -- unpack just the key, not the value
*/
__flatten
-struct bkey bkey_unpack_key(const struct bkey_format *format,
+struct bkey bkey_unpack_key(const struct btree_keys *b,
const struct bkey_packed *src)
{
return likely(bkey_packed(src))
- ? __bkey_unpack_key(format, src)
+ ? __bkey_unpack_key(&b->format, src)
: *packed_to_bkey_c(src);
}
/**
* bkey_unpack -- unpack the key and the value
*/
-void bkey_unpack(struct bkey_i *dst,
- const struct bkey_format *format,
+void bkey_unpack(const struct btree_keys *b, struct bkey_i *dst,
const struct bkey_packed *src)
{
- dst->k = bkey_unpack_key(format, src);
+ dst->k = bkey_unpack_key(b, src);
memcpy_u64s(&dst->v,
- bkeyp_val(format, src),
- bkeyp_val_u64s(format, src));
+ bkeyp_val(&b->format, src),
+ bkeyp_val_u64s(&b->format, src));
}
/**
@@ -456,20 +456,23 @@ static bool set_inc_field_lossy(struct pack_state *state, unsigned field, u64 v)
#ifdef CONFIG_BCACHE_DEBUG
static bool bkey_packed_successor(struct bkey_packed *out,
- const struct bkey_format *format,
+ const struct btree_keys *b,
struct bkey_packed k)
{
- unsigned nr_key_bits = bkey_format_key_bits(format);
+ const struct bkey_format *f = &b->format;
+ unsigned nr_key_bits = b->nr_key_bits;
unsigned first_bit, offset;
u64 *p;
+ EBUG_ON(b->nr_key_bits != bkey_format_key_bits(f));
+
if (!nr_key_bits)
return false;
*out = k;
first_bit = high_bit_offset + nr_key_bits - 1;
- p = nth_word(high_word(format, out), first_bit >> 6);
+ p = nth_word(high_word(f, out), first_bit >> 6);
offset = 63 - (first_bit & 63);
while (nr_key_bits) {
@@ -478,7 +481,7 @@ static bool bkey_packed_successor(struct bkey_packed *out,
if ((*p & mask) != mask) {
*p += 1ULL << offset;
- EBUG_ON(__bkey_cmp_packed(format, out, &k) <= 0);
+ EBUG_ON(__bkey_cmp_packed(out, &k, b) <= 0);
return true;
}
@@ -502,9 +505,10 @@ static bool bkey_packed_successor(struct bkey_packed *out,
*/
enum bkey_pack_pos_ret bkey_pack_pos_lossy(struct bkey_packed *out,
struct bpos in,
- const struct bkey_format *format)
+ const struct btree_keys *b)
{
- struct pack_state state = pack_state_init(format, out);
+ const struct bkey_format *f = &b->format;
+ struct pack_state state = pack_state_init(f, out);
#ifdef CONFIG_BCACHE_DEBUG
struct bpos orig = in;
#endif
@@ -513,7 +517,7 @@ enum bkey_pack_pos_ret bkey_pack_pos_lossy(struct bkey_packed *out,
out->_data[0] = 0;
if (unlikely(in.snapshot <
- le64_to_cpu(format->field_offset[BKEY_FIELD_SNAPSHOT]))) {
+ le64_to_cpu(f->field_offset[BKEY_FIELD_SNAPSHOT]))) {
if (!in.offset-- &&
!in.inode--)
return BKEY_PACK_POS_FAIL;
@@ -522,7 +526,7 @@ enum bkey_pack_pos_ret bkey_pack_pos_lossy(struct bkey_packed *out,
}
if (unlikely(in.offset <
- le64_to_cpu(format->field_offset[BKEY_FIELD_OFFSET]))) {
+ le64_to_cpu(f->field_offset[BKEY_FIELD_OFFSET]))) {
if (!in.inode--)
return BKEY_PACK_POS_FAIL;
in.offset = KEY_OFFSET_MAX;
@@ -531,7 +535,7 @@ enum bkey_pack_pos_ret bkey_pack_pos_lossy(struct bkey_packed *out,
}
if (unlikely(in.inode <
- le64_to_cpu(format->field_offset[BKEY_FIELD_INODE])))
+ le64_to_cpu(f->field_offset[BKEY_FIELD_INODE])))
return BKEY_PACK_POS_FAIL;
if (!set_inc_field_lossy(&state, BKEY_FIELD_INODE, in.inode)) {
@@ -549,19 +553,19 @@ enum bkey_pack_pos_ret bkey_pack_pos_lossy(struct bkey_packed *out,
exact = false;
pack_state_finish(&state, out);
- out->u64s = format->key_u64s;
+ out->u64s = f->key_u64s;
out->format = KEY_FORMAT_LOCAL_BTREE;
out->type = KEY_TYPE_DELETED;
#ifdef CONFIG_BCACHE_DEBUG
if (exact) {
- BUG_ON(bkey_cmp_left_packed(format, out, orig));
+ BUG_ON(bkey_cmp_left_packed(b, out, orig));
} else {
struct bkey_packed successor;
- BUG_ON(bkey_cmp_left_packed(format, out, orig) >= 0);
- BUG_ON(bkey_packed_successor(&successor, format, *out) &&
- bkey_cmp_left_packed(format, &successor, orig) < 0);
+ BUG_ON(bkey_cmp_left_packed(b, out, orig) >= 0);
+ BUG_ON(bkey_packed_successor(&successor, b, *out) &&
+ bkey_cmp_left_packed(b, &successor, orig) < 0);
}
#endif
@@ -674,16 +678,18 @@ const char *bch_bkey_format_validate(struct bkey_format *f)
* Most significant differing bit
* Bits are indexed from 0 - return is [0, nr_key_bits)
*/
-unsigned bkey_greatest_differing_bit(const struct bkey_format *format,
+unsigned bkey_greatest_differing_bit(const struct btree_keys *b,
const struct bkey_packed *l_k,
const struct bkey_packed *r_k)
{
- const u64 *l = high_word(format, l_k);
- const u64 *r = high_word(format, r_k);
- unsigned nr_key_bits = bkey_format_key_bits(format);
+ const u64 *l = high_word(&b->format, l_k);
+ const u64 *r = high_word(&b->format, r_k);
+ unsigned nr_key_bits = b->nr_key_bits;
unsigned word_bits = 64 - high_bit_offset;
u64 l_v, r_v;
+ EBUG_ON(b->nr_key_bits != bkey_format_key_bits(&b->format));
+
/* for big endian, skip past header */
l_v = *l & (~0ULL >> high_bit_offset);
r_v = *r & (~0ULL >> high_bit_offset);
@@ -715,13 +721,15 @@ unsigned bkey_greatest_differing_bit(const struct bkey_format *format,
* First set bit
* Bits are indexed from 0 - return is [0, nr_key_bits)
*/
-unsigned bkey_ffs(const struct bkey_format *format,
+unsigned bkey_ffs(const struct btree_keys *b,
const struct bkey_packed *k)
{
- const u64 *p = high_word(format, k);
- unsigned nr_key_bits = bkey_format_key_bits(format);
+ const u64 *p = high_word(&b->format, k);
+ unsigned nr_key_bits = b->nr_key_bits;
unsigned ret = 0, offset;
+ EBUG_ON(b->nr_key_bits != bkey_format_key_bits(&b->format));
+
offset = nr_key_bits;
while (offset > 64) {
p = next_word(p);
@@ -750,7 +758,8 @@ unsigned bkey_ffs(const struct bkey_format *format,
}
#ifdef CONFIG_X86_64
-static int __bkey_cmp_bits(const u64 *l, const u64 *r, unsigned nr_key_bits)
+static inline int __bkey_cmp_bits(const u64 *l, const u64 *r,
+ unsigned nr_key_bits)
{
long d0, d1, d2, d3;
int cmp;
@@ -793,7 +802,8 @@ static int __bkey_cmp_bits(const u64 *l, const u64 *r, unsigned nr_key_bits)
return cmp;
}
#else
-static int __bkey_cmp_bits(const u64 *l, const u64 *r, unsigned nr_key_bits)
+static inline int __bkey_cmp_bits(const u64 *l, const u64 *r,
+ unsigned nr_key_bits)
{
u64 l_v, r_v;
@@ -866,28 +876,32 @@ int bkey_cmp(const struct bkey *l, const struct bkey *r)
}
#endif
-int __bkey_cmp_packed(const struct bkey_format *f,
- const struct bkey_packed *l,
- const struct bkey_packed *r)
+int __bkey_cmp_packed(const struct bkey_packed *l,
+ const struct bkey_packed *r,
+ const struct btree_keys *b)
{
+ const struct bkey_format *f = &b->format;
int ret;
EBUG_ON(!bkey_packed(l) || !bkey_packed(r));
+ EBUG_ON(b->nr_key_bits != bkey_format_key_bits(f));
ret = __bkey_cmp_bits(high_word(f, l),
high_word(f, r),
- bkey_format_key_bits(f));
+ b->nr_key_bits);
- EBUG_ON(ret != bkey_cmp(bkey_unpack_key(f, l).p,
- bkey_unpack_key(f, r).p));
+ EBUG_ON(ret != bkey_cmp(bkey_unpack_key(b, l).p,
+ bkey_unpack_key(b, r).p));
return ret;
}
__flatten
-int __bkey_cmp_left_packed(const struct bkey_format *format,
+int __bkey_cmp_left_packed(const struct btree_keys *b,
const struct bkey_packed *l, struct bpos r)
{
- return bkey_cmp(__bkey_unpack_pos(format, l), r);
+ const struct bkey_format *f = &b->format;
+
+ return bkey_cmp(__bkey_unpack_pos(f, l), r);
}
void bch_bpos_swab(struct bpos *p)
diff --git a/drivers/md/bcache/bkey.h b/drivers/md/bcache/bkey.h
index acfdd813203b..a7894f2115e9 100644
--- a/drivers/md/bcache/bkey.h
+++ b/drivers/md/bcache/bkey.h
@@ -76,84 +76,6 @@ static inline void set_bkey_deleted(struct bkey *k)
#define bkey_whiteout(_k) \
((_k)->type == KEY_TYPE_DELETED || (_k)->type == KEY_TYPE_DISCARD)
-static inline void __memcpy_u64s(void *dst, const void *src,
- unsigned u64s)
-{
-#ifdef CONFIG_X86_64
- long d0, d1, d2;
- asm volatile("rep ; movsq"
- : "=&c" (d0), "=&D" (d1), "=&S" (d2)
- : "0" (u64s), "1" (dst), "2" (src)
- : "memory");
-#else
- u64 *d = dst;
- const u64 *s = src;
-
- while (u64s--)
- *d++ = *s++;
-#endif
-}
-
-static inline void memcpy_u64s(void *dst, const void *src,
- unsigned u64s)
-{
- EBUG_ON(!(dst >= src + u64s * sizeof(u64) ||
- dst + u64s * sizeof(u64) <= src));
-
- __memcpy_u64s(dst, src, u64s);
-}
-
-static inline void __memmove_u64s_down(void *dst, const void *src,
- unsigned u64s)
-{
- __memcpy_u64s(dst, src, u64s);
-}
-
-static inline void memmove_u64s_down(void *dst, const void *src,
- unsigned u64s)
-{
- EBUG_ON(dst > src);
-
- __memmove_u64s_down(dst, src, u64s);
-}
-
-static inline void __memmove_u64s_up(void *_dst, const void *_src,
- unsigned u64s)
-{
- u64 *dst = (u64 *) _dst + u64s - 1;
- u64 *src = (u64 *) _src + u64s - 1;
-
-#ifdef CONFIG_X86_64
- long d0, d1, d2;
- asm volatile("std ;\n"
- "rep ; movsq\n"
- "cld ;\n"
- : "=&c" (d0), "=&D" (d1), "=&S" (d2)
- : "0" (u64s), "1" (dst), "2" (src)
- : "memory");
-#else
- while (u64s--)
- *dst-- = *src--;
-#endif
-}
-
-static inline void memmove_u64s_up(void *dst, const void *src,
- unsigned u64s)
-{
- EBUG_ON(dst < src);
-
- __memmove_u64s_up(dst, src, u64s);
-}
-
-static inline void memmove_u64s(void *dst, const void *src,
- unsigned u64s)
-{
- if (dst < src)
- __memmove_u64s_down(dst, src, u64s);
- else
- __memmove_u64s_up(dst, src, u64s);
-}
-
#define bkey_copy(_dst, _src) \
do { \
BUILD_BUG_ON(!type_is(_dst, struct bkey_i *) && \
@@ -181,12 +103,12 @@ void bch_bkey_format_add_pos(struct bkey_format_state *, struct bpos);
struct bkey_format bch_bkey_format_done(struct bkey_format_state *);
const char *bch_bkey_format_validate(struct bkey_format *);
-unsigned bkey_greatest_differing_bit(const struct bkey_format *,
+unsigned bkey_greatest_differing_bit(const struct btree_keys *,
const struct bkey_packed *,
const struct bkey_packed *);
-unsigned bkey_ffs(const struct bkey_format *, const struct bkey_packed *);
+unsigned bkey_ffs(const struct btree_keys *, const struct bkey_packed *);
-int __bkey_cmp_left_packed(const struct bkey_format *,
+int __bkey_cmp_left_packed(const struct btree_keys *,
const struct bkey_packed *,
struct bpos);
@@ -199,9 +121,9 @@ int __bkey_cmp_left_packed(const struct bkey_format *,
: __bkey_cmp_left_packed(_format, _l, _r); \
})
-int __bkey_cmp_packed(const struct bkey_format *,
+int __bkey_cmp_packed(const struct bkey_packed *,
const struct bkey_packed *,
- const struct bkey_packed *);
+ const struct btree_keys *);
#if 1
static __always_inline int bkey_cmp(struct bpos l, struct bpos r)
@@ -271,38 +193,19 @@ static inline unsigned bkey_format_key_bits(const struct bkey_format *format)
* If @_l and @_r are in the same format, does the comparison without unpacking.
* Otherwise, unpacks whichever one is packed.
*/
-#define bkey_cmp_packed(_f, _l, _r) \
+#define bkey_cmp_packed(_b, _l, _r) \
((bkey_packed_typecheck(_l) && bkey_packed_typecheck(_r)) \
- ? __bkey_cmp_packed(_f, (void *) _l, (void *) _r) \
+ ? __bkey_cmp_packed((void *) (_l), (void *) (_r), (_b)) \
: bkey_packed_typecheck(_l) \
- ? __bkey_cmp_left_packed(_f, \
- (struct bkey_packed *) _l, \
- ((struct bkey *) _r)->p) \
+ ? __bkey_cmp_left_packed(_b, \
+ (struct bkey_packed *) (_l), \
+ ((struct bkey *) (_r))->p) \
: bkey_packed_typecheck(_r) \
- ? -__bkey_cmp_left_packed(_f, \
- (struct bkey_packed *) _r, \
- ((struct bkey *) _l)->p) \
- : bkey_cmp(((struct bkey *) _l)->p, \
- ((struct bkey *) _r)->p))
-
-/* packed or unpacked */
-static inline int bkey_cmp_p_or_unp(const struct bkey_format *format,
- const struct bkey_packed *l,
- const struct bkey_packed *r_packed,
- struct bpos r)
-{
- const struct bkey *l_unpacked;
-
- EBUG_ON(r_packed && !bkey_packed(r_packed));
-
- if (unlikely(l_unpacked = packed_to_bkey_c(l)))
- return bkey_cmp(l_unpacked->p, r);
-
- if (likely(r_packed))
- return __bkey_cmp_packed(format, l, r_packed);
-
- return __bkey_cmp_left_packed(format, l, r);
-}
+ ? -__bkey_cmp_left_packed((_b), \
+ (struct bkey_packed *) (_r), \
+ ((struct bkey *) (_l))->p) \
+ : bkey_cmp(((struct bkey *) (_l))->p, \
+ ((struct bkey *) (_r))->p))
static inline struct bpos bkey_successor(struct bpos p)
{
@@ -373,7 +276,7 @@ bool bch_bkey_transform(const struct bkey_format *,
const struct bkey_format *,
const struct bkey_packed *);
-struct bkey bkey_unpack_key(const struct bkey_format *,
+struct bkey bkey_unpack_key(const struct btree_keys *,
const struct bkey_packed *);
bool bkey_pack_key(struct bkey_packed *, const struct bkey *,
const struct bkey_format *);
@@ -385,15 +288,15 @@ enum bkey_pack_pos_ret {
};
enum bkey_pack_pos_ret bkey_pack_pos_lossy(struct bkey_packed *, struct bpos,
- const struct bkey_format *);
+ const struct btree_keys *);
static inline bool bkey_pack_pos(struct bkey_packed *out, struct bpos in,
- const struct bkey_format *format)
+ const struct btree_keys *b)
{
- return bkey_pack_pos_lossy(out, in, format) == BKEY_PACK_POS_EXACT;
+ return bkey_pack_pos_lossy(out, in, b) == BKEY_PACK_POS_EXACT;
}
-void bkey_unpack(struct bkey_i *, const struct bkey_format *,
+void bkey_unpack(const struct btree_keys *, struct bkey_i *,
const struct bkey_packed *);
bool bkey_pack(struct bkey_packed *, const struct bkey_i *,
const struct bkey_format *);
@@ -406,27 +309,6 @@ static inline u64 bkey_field_max(const struct bkey_format *f,
: U64_MAX;
}
-/* Disassembled bkeys */
-
-static inline struct bkey_s_c bkey_disassemble(const struct bkey_format *f,
- const struct bkey_packed *k,
- struct bkey *u)
-{
- *u = bkey_unpack_key(f, k);
-
- return (struct bkey_s_c) { u, bkeyp_val(f, k), };
-}
-
-/* non const version: */
-static inline struct bkey_s __bkey_disassemble(const struct bkey_format *f,
- struct bkey_packed *k,
- struct bkey *u)
-{
- *u = bkey_unpack_key(f, k);
-
- return (struct bkey_s) { .k = u, .v = bkeyp_val(f, k), };
-}
-
static inline void bkey_reassemble(struct bkey_i *dst,
struct bkey_s_c src)
{
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index e0ab5479adca..e65d754108ab 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -50,7 +50,6 @@ struct bset_tree *bch_bkey_to_bset(struct btree_keys *b, struct bkey_packed *k)
void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set)
{
- struct bkey_format *f = &b->format;
struct bkey_packed *_k, *_n;
struct bkey k, n;
char buf[120];
@@ -58,7 +57,7 @@ void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set)
if (!i->u64s)
return;
- for (_k = i->start, k = bkey_unpack_key(f, _k);
+ for (_k = i->start, k = bkey_unpack_key(b, _k);
_k < bset_bkey_last(i);
_k = _n, k = n) {
_n = bkey_next(_k);
@@ -70,7 +69,7 @@ void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set)
if (_n == bset_bkey_last(i))
continue;
- n = bkey_unpack_key(f, _n);
+ n = bkey_unpack_key(b, _n);
if (bkey_cmp(bkey_start_pos(&n), k.p) < 0) {
printk(KERN_ERR "Key skipped backwards\n");
@@ -110,7 +109,7 @@ void bch_dump_btree_node_iter(struct btree_keys *b,
btree_node_iter_for_each(iter, set) {
struct bkey_packed *k = __btree_node_offset_to_key(b, set->k);
struct bset_tree *t = bch_bkey_to_bset(b, k);
- struct bkey uk = bkey_unpack_key(&b->format, k);
+ struct bkey uk = bkey_unpack_key(b, k);
char buf[100];
bch_bkey_to_text(buf, sizeof(buf), &uk);
@@ -121,18 +120,18 @@ void bch_dump_btree_node_iter(struct btree_keys *b,
#ifdef CONFIG_BCACHE_DEBUG
-static bool keys_out_of_order(const struct bkey_format *f,
+static bool keys_out_of_order(struct btree_keys *b,
const struct bkey_packed *prev,
const struct bkey_packed *next,
bool is_extents)
{
- struct bkey nextu = bkey_unpack_key(f, next);
+ struct bkey nextu = bkey_unpack_key(b, next);
- return bkey_cmp_left_packed(f, prev, bkey_start_pos(&nextu)) > 0 ||
+ return bkey_cmp_left_packed(b, prev, bkey_start_pos(&nextu)) > 0 ||
((is_extents
? !bkey_deleted(next)
: !bkey_deleted(prev)) &&
- !bkey_cmp_packed(f, prev, next));
+ !bkey_cmp_packed(b, prev, next));
}
void __bch_verify_btree_nr_keys(struct btree_keys *b)
@@ -155,15 +154,14 @@ static void bch_btree_node_iter_next_check(struct btree_node_iter *iter,
struct btree_keys *b,
struct bkey_packed *k)
{
- const struct bkey_format *f = &b->format;
const struct bkey_packed *n = bch_btree_node_iter_peek_all(iter, b);
- bkey_unpack_key(f, k);
+ bkey_unpack_key(b, k);
if (n &&
- keys_out_of_order(f, k, n, iter->is_extents)) {
- struct bkey ku = bkey_unpack_key(f, k);
- struct bkey nu = bkey_unpack_key(f, n);
+ keys_out_of_order(b, k, n, iter->is_extents)) {
+ struct bkey ku = bkey_unpack_key(b, k);
+ struct bkey nu = bkey_unpack_key(b, n);
char buf1[80], buf2[80];
bch_dump_btree_node(b);
@@ -210,18 +208,17 @@ void bch_verify_key_order(struct btree_keys *b,
struct btree_node_iter *iter,
struct bkey_packed *where)
{
- const struct bkey_format *f = &b->format;
struct bset_tree *t = bch_bkey_to_bset(b, where);
struct bkey_packed *k, *prev;
- struct bkey uk, uw = bkey_unpack_key(f, where);
+ struct bkey uk, uw = bkey_unpack_key(b, where);
k = bkey_prev_all(t, where);
if (k &&
- keys_out_of_order(f, k, where, iter->is_extents)) {
+ keys_out_of_order(b, k, where, iter->is_extents)) {
char buf1[100], buf2[100];
bch_dump_btree_node(b);
- uk = bkey_unpack_key(f, k);
+ uk = bkey_unpack_key(b, k);
bch_bkey_to_text(buf1, sizeof(buf1), &uk);
bch_bkey_to_text(buf2, sizeof(buf2), &uw);
panic("out of order with prev:\n%s\n%s\n",
@@ -230,7 +227,7 @@ void bch_verify_key_order(struct btree_keys *b,
k = bkey_next(where);
BUG_ON(k != bset_bkey_last(t->data) &&
- keys_out_of_order(f, where, k, iter->is_extents));
+ keys_out_of_order(b, where, k, iter->is_extents));
for_each_bset(b, t) {
if (!t->data->u64s)
@@ -245,14 +242,14 @@ void bch_verify_key_order(struct btree_keys *b,
if (k == bset_bkey_last(t->data))
k = bkey_prev_all(t, k);
- while (bkey_cmp_left_packed(f, k, bkey_start_pos(&uw)) > 0 &&
+ while (bkey_cmp_left_packed(b, k, bkey_start_pos(&uw)) > 0 &&
(prev = bkey_prev_all(t, k)))
k = prev;
for (;
k != bset_bkey_last(t->data);
k = bkey_next(k)) {
- uk = bkey_unpack_key(f, k);
+ uk = bkey_unpack_key(b, k);
if (iter->is_extents) {
BUG_ON(!(bkey_cmp(uw.p, bkey_start_pos(&uk)) <= 0 ||
@@ -633,7 +630,7 @@ static inline unsigned bfloat_mantissa(const struct bkey_packed *k,
return v & BKEY_MANTISSA_MASK;
}
-static void make_bfloat(struct bkey_format *format,
+static void make_bfloat(const struct btree_keys *b,
struct bset_tree *t, unsigned j)
{
struct bkey_float *f = &t->tree[j];
@@ -674,7 +671,7 @@ static void make_bfloat(struct bkey_format *format,
* Note that this may be negative - we may be running off the low end
* of the key: we handle this later:
*/
- exponent = (int) bkey_greatest_differing_bit(format, l, r) -
+ exponent = (int) bkey_greatest_differing_bit(b, l, r) -
(BKEY_MANTISSA_BITS - 1);
/*
@@ -682,14 +679,12 @@ static void make_bfloat(struct bkey_format *format,
* (k->_data), to get the key bits starting at exponent:
*/
#ifdef __LITTLE_ENDIAN
- shift = (int) (format->key_u64s * 64 -
- bkey_format_key_bits(format)) +
- exponent;
+ shift = (int) (b->format.key_u64s * 64 - b->nr_key_bits) + exponent;
- EBUG_ON(shift + BKEY_MANTISSA_BITS > format->key_u64s * 64);
+ EBUG_ON(shift + BKEY_MANTISSA_BITS > b->format.key_u64s * 64);
#else
shift = high_bit_offset +
- bkey_format_key_bits(format) -
+ b->nr_key_bits -
exponent -
BKEY_MANTISSA_BITS;
@@ -715,7 +710,7 @@ static void make_bfloat(struct bkey_format *format,
*/
if (exponent > 0 &&
f->mantissa == bfloat_mantissa(p, f) &&
- bkey_cmp_packed(format, p, m)) {
+ bkey_cmp_packed(b, p, m)) {
f->exponent |= BFLOAT_FAILED_PREV;
return;
}
@@ -725,7 +720,7 @@ static void make_bfloat(struct bkey_format *format,
* the comparison in bset_search_tree. If we're dropping set bits,
* increment it:
*/
- if (exponent > (int) bkey_ffs(format, m)) {
+ if (exponent > (int) bkey_ffs(b, m)) {
if (f->mantissa == BKEY_MANTISSA_MASK)
f->exponent = BFLOAT_FAILED_OVERFLOW;
@@ -811,7 +806,7 @@ retry:
for (j = inorder_next(0, t->size);
j;
j = inorder_next(j, t->size))
- make_bfloat(&b->format, t, j);
+ make_bfloat(b, t, j);
}
static void bset_alloc_tree(struct btree_keys *b, struct bset_tree *t)
@@ -970,13 +965,13 @@ void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bset_tree *t,
if (k == t->data->start)
for (j = 1; j < t->size; j = j * 2)
- make_bfloat(&b->format, t, j);
+ make_bfloat(b, t, j);
if (bkey_next(k) == bset_bkey_last(t->data)) {
t->end = *k;
for (j = 1; j < t->size; j = j * 2 + 1)
- make_bfloat(&b->format, t, j);
+ make_bfloat(b, t, j);
}
j = inorder_to_tree(inorder, t);
@@ -985,11 +980,11 @@ void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bset_tree *t,
j < t->size &&
k == tree_to_bkey(t, j)) {
/* Fix the auxiliary search tree node this key corresponds to */
- make_bfloat(&b->format, t, j);
+ make_bfloat(b, t, j);
/* Children for which this key is the right side boundary */
for (j = j * 2; j < t->size; j = j * 2 + 1)
- make_bfloat(&b->format, t, j);
+ make_bfloat(b, t, j);
}
j = inorder_to_tree(inorder + 1, t);
@@ -997,11 +992,11 @@ void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bset_tree *t,
if (j &&
j < t->size &&
k == tree_to_prev_bkey(t, j)) {
- make_bfloat(&b->format, t, j);
+ make_bfloat(b, t, j);
/* Children for which this key is the left side boundary */
for (j = j * 2 + 1; j < t->size; j = j * 2)
- make_bfloat(&b->format, t, j);
+ make_bfloat(b, t, j);
}
}
EXPORT_SYMBOL(bch_bset_fix_invalidated_key);
@@ -1159,7 +1154,7 @@ void bch_bset_delete(struct btree_keys *b,
/* Lookup */
__flatten
-static struct bkey_packed *bset_search_write_set(const struct bkey_format *f,
+static struct bkey_packed *bset_search_write_set(const struct btree_keys *b,
struct bset_tree *t,
struct bpos search,
const struct bkey_packed *packed_search)
@@ -1169,7 +1164,7 @@ static struct bkey_packed *bset_search_write_set(const struct bkey_format *f,
while (li + 1 != ri) {
unsigned m = (li + ri) >> 1;
- if (bkey_cmp_p_or_unp(f, table_to_bkey(t, m),
+ if (bkey_cmp_p_or_unp(b, table_to_bkey(t, m),
packed_search, search) >= 0)
ri = m;
else
@@ -1180,7 +1175,7 @@ static struct bkey_packed *bset_search_write_set(const struct bkey_format *f,
}
__flatten
-static struct bkey_packed *bset_search_tree(const struct bkey_format *format,
+static struct bkey_packed *bset_search_tree(const struct btree_keys *b,
struct bset_tree *t,
struct bpos search,
const struct bkey_packed *packed_search)
@@ -1235,7 +1230,7 @@ static struct bkey_packed *bset_search_tree(const struct bkey_format *format,
bfloat_mantissa(packed_search,
f))) >> 31);
else
- n = bkey_cmp_p_or_unp(format, tree_to_bkey(t, n),
+ n = bkey_cmp_p_or_unp(b, tree_to_bkey(t, n),
packed_search, search) >= 0
? n * 2
: n * 2 + 1;
@@ -1258,6 +1253,8 @@ static struct bkey_packed *bset_search_tree(const struct bkey_format *format,
}
}
+/* XXX: different version for lossy_packed_search = NULL */
+
/*
* Returns the first key greater than or equal to @search
*/
@@ -1269,7 +1266,6 @@ static struct bkey_packed *bch_bset_search(struct btree_keys *b,
const struct bkey_packed *lossy_packed_search,
bool strictly_greater)
{
- const struct bkey_format *f = &b->format;
struct bkey_packed *m;
/*
@@ -1292,7 +1288,7 @@ static struct bkey_packed *bch_bset_search(struct btree_keys *b,
m = t->data->start;
break;
case BSET_RW_AUX_TREE:
- m = bset_search_write_set(f, t, search, lossy_packed_search);
+ m = bset_search_write_set(b, t, search, lossy_packed_search);
break;
case BSET_RO_AUX_TREE:
/*
@@ -1302,34 +1298,34 @@ static struct bkey_packed *bch_bset_search(struct btree_keys *b,
* start and end - handle that here:
*/
- if (unlikely(bkey_cmp_p_or_unp(f, &t->end,
+ if (unlikely(bkey_cmp_p_or_unp(b, &t->end,
packed_search, search) < 0))
return bset_bkey_last(t->data);
- if (unlikely(bkey_cmp_p_or_unp(f, t->data->start,
+ if (unlikely(bkey_cmp_p_or_unp(b, t->data->start,
packed_search, search) >= 0))
m = t->data->start;
else
- m = bset_search_tree(f, t, search, lossy_packed_search);
+ m = bset_search_tree(b, t, search, lossy_packed_search);
break;
}
if (lossy_packed_search)
while (m != bset_bkey_last(t->data) &&
- !btree_iter_pos_cmp_p_or_unp(f, search, lossy_packed_search,
+ !btree_iter_pos_cmp_p_or_unp(b, search, lossy_packed_search,
m, strictly_greater))
m = bkey_next(m);
if (!packed_search)
while (m != bset_bkey_last(t->data) &&
- !btree_iter_pos_cmp_packed(f, search, m, strictly_greater))
+ !btree_iter_pos_cmp_packed(b, search, m, strictly_greater))
m = bkey_next(m);
if (IS_ENABLED(CONFIG_BCACHE_DEBUG)) {
struct bkey_packed *prev = bkey_prev_all(t, m);
BUG_ON(prev &&
- btree_iter_pos_cmp_p_or_unp(f, search, packed_search,
+ btree_iter_pos_cmp_p_or_unp(b, search, packed_search,
prev, strictly_greater));
}
@@ -1410,7 +1406,7 @@ void bch_btree_node_iter_init(struct btree_node_iter *iter,
BUG_ON(b->nsets > MAX_BSETS);
- switch (bkey_pack_pos_lossy(&p, search, &b->format)) {
+ switch (bkey_pack_pos_lossy(&p, search, b)) {
case BKEY_PACK_POS_EXACT:
packed_search = &p;
lossy_packed_search = &p;
@@ -1609,7 +1605,7 @@ struct bkey_s_c bch_btree_node_iter_peek_unpack(struct btree_node_iter *iter,
{
struct bkey_packed *k = bch_btree_node_iter_peek(iter, b);
- return k ? bkey_disassemble(&b->format, k, u) : bkey_s_c_null;
+ return k ? bkey_disassemble(b, k, u) : bkey_s_c_null;
}
EXPORT_SYMBOL(bch_btree_node_iter_peek_unpack);
@@ -1666,7 +1662,7 @@ int bch_bkey_print_bfloat(struct btree_keys *b, struct bkey_packed *k,
k == tree_to_bkey(t, j))
switch (bkey_float_type(&t->tree[j])) {
case BFLOAT_FAILED_UNPACKED:
- uk = bkey_unpack_key(&b->format, k);
+ uk = bkey_unpack_key(b, k);
return scnprintf(buf, size,
" failed unpacked at depth %u\n"
"\t%llu:%llu\n",
@@ -1682,10 +1678,10 @@ int bch_bkey_print_bfloat(struct btree_keys *b, struct bkey_packed *k,
le16_to_cpu(t->data->u64s) - t->end.u64s)
: tree_to_bkey(t, j >> (ffz(j) + 1));
- up = bkey_unpack_key(&b->format, p);
- uk = bkey_unpack_key(&b->format, k);
- bch_to_binary(buf1, high_word(&b->format, p), bkey_format_key_bits(&b->format));
- bch_to_binary(buf2, high_word(&b->format, k), bkey_format_key_bits(&b->format));
+ up = bkey_unpack_key(b, p);
+ uk = bkey_unpack_key(b, k);
+ bch_to_binary(buf1, high_word(&b->format, p), b->nr_key_bits);
+ bch_to_binary(buf2, high_word(&b->format, k), b->nr_key_bits);
return scnprintf(buf, size,
" failed prev at depth %u\n"
@@ -1695,13 +1691,13 @@ int bch_bkey_print_bfloat(struct btree_keys *b, struct bkey_packed *k,
"\t%s\n"
"\t%s\n",
ilog2(j),
- bkey_greatest_differing_bit(&b->format, l, r),
- bkey_greatest_differing_bit(&b->format, p, k),
+ bkey_greatest_differing_bit(b, l, r),
+ bkey_greatest_differing_bit(b, p, k),
uk.p.inode, uk.p.offset,
up.p.inode, up.p.offset,
buf1, buf2);
case BFLOAT_FAILED_OVERFLOW:
- uk = bkey_unpack_key(&b->format, k);
+ uk = bkey_unpack_key(b, k);
return scnprintf(buf, size,
" failed overflow at depth %u\n"
"\t%llu:%llu\n",
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
index 311d536e191b..c88df03c908d 100644
--- a/drivers/md/bcache/bset.h
+++ b/drivers/md/bcache/bset.h
@@ -226,6 +226,7 @@ struct btree_nr_keys {
struct btree_keys {
u8 nsets;
u8 page_order;
+ u8 nr_key_bits;
struct btree_nr_keys nr;
@@ -244,6 +245,34 @@ struct btree_keys {
#endif
};
+static inline void btree_node_set_format(struct btree_keys *b,
+ struct bkey_format f)
+{
+ b->format = f;
+ b->nr_key_bits = bkey_format_key_bits(&f);
+}
+
+/* Disassembled bkeys */
+
+static inline struct bkey_s_c bkey_disassemble(struct btree_keys *b,
+ const struct bkey_packed *k,
+ struct bkey *u)
+{
+ *u = bkey_unpack_key(b, k);
+
+ return (struct bkey_s_c) { u, bkeyp_val(&b->format, k), };
+}
+
+/* non const version: */
+static inline struct bkey_s __bkey_disassemble(struct btree_keys *b,
+ struct bkey_packed *k,
+ struct bkey *u)
+{
+ *u = bkey_unpack_key(b, k);
+
+ return (struct bkey_s) { .k = u, .v = bkeyp_val(&b->format, k), };
+}
+
#define for_each_bset(_b, _t) \
for (_t = (_b)->set; _t < (_b)->set + (_b)->nsets; _t++)
@@ -321,6 +350,25 @@ void bch_bset_delete(struct btree_keys *, struct bkey_packed *, unsigned);
/* Bkey utility code */
+/* packed or unpacked */
+static inline int bkey_cmp_p_or_unp(const struct btree_keys *b,
+ const struct bkey_packed *l,
+ const struct bkey_packed *r_packed,
+ struct bpos r)
+{
+ const struct bkey *l_unpacked;
+
+ EBUG_ON(r_packed && !bkey_packed(r_packed));
+
+ if (unlikely(l_unpacked = packed_to_bkey_c(l)))
+ return bkey_cmp(l_unpacked->p, r);
+
+ if (likely(r_packed))
+ return __bkey_cmp_packed(l, r_packed, b);
+
+ return __bkey_cmp_left_packed(b, l, r);
+}
+
/* Returns true if @k is after iterator position @pos */
static inline bool btree_iter_pos_cmp(struct bpos pos, const struct bkey *k,
bool strictly_greater)
@@ -331,24 +379,24 @@ static inline bool btree_iter_pos_cmp(struct bpos pos, const struct bkey *k,
(cmp == 0 && !strictly_greater && !bkey_deleted(k));
}
-static inline bool btree_iter_pos_cmp_packed(const struct bkey_format *f,
+static inline bool btree_iter_pos_cmp_packed(const struct btree_keys *b,
struct bpos pos,
const struct bkey_packed *k,
bool strictly_greater)
{
- int cmp = bkey_cmp_left_packed(f, k, pos);
+ int cmp = bkey_cmp_left_packed(b, k, pos);
return cmp > 0 ||
(cmp == 0 && !strictly_greater && !bkey_deleted(k));
}
-static inline bool btree_iter_pos_cmp_p_or_unp(const struct bkey_format *f,
+static inline bool btree_iter_pos_cmp_p_or_unp(const struct btree_keys *b,
struct bpos pos,
const struct bkey_packed *pos_packed,
const struct bkey_packed *k,
bool strictly_greater)
{
- int cmp = bkey_cmp_p_or_unp(f, k, pos_packed, pos);
+ int cmp = bkey_cmp_p_or_unp(b, k, pos_packed, pos);
return cmp > 0 ||
(cmp == 0 && !strictly_greater && !bkey_deleted(k));
@@ -465,7 +513,7 @@ static inline int __btree_node_iter_cmp(bool is_extents,
* For extents, bkey_deleted() is used as a proxy for k->size == 0, so
* deleted keys have to sort last.
*/
- return bkey_cmp_packed(&b->format, l, r) ?: is_extents
+ return bkey_cmp_packed(b, l, r) ?: is_extents
? (int) bkey_deleted(l) - (int) bkey_deleted(r)
: (int) bkey_deleted(r) - (int) bkey_deleted(l);
}
diff --git a/drivers/md/bcache/btree_gc.c b/drivers/md/bcache/btree_gc.c
index e0b08e137094..1def36d3cee9 100644
--- a/drivers/md/bcache/btree_gc.c
+++ b/drivers/md/bcache/btree_gc.c
@@ -565,7 +565,7 @@ static void bch_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES],
} else if (u64s) {
/* move part of n2 into n1 */
n1->key.k.p = n1->data->max_key =
- bkey_unpack_key(&n1->keys.format, last).p;
+ bkey_unpack_key(&n1->keys, last).p;
n2->data->min_key =
btree_type_successor(iter->btree_id,
diff --git a/drivers/md/bcache/btree_io.c b/drivers/md/bcache/btree_io.c
index 108e6e76caa4..81880298f5d4 100644
--- a/drivers/md/bcache/btree_io.c
+++ b/drivers/md/bcache/btree_io.c
@@ -21,12 +21,11 @@ static void verify_no_dups(struct btree *b,
struct bkey_packed *end)
{
#ifdef CONFIG_BCACHE_DEBUG
- const struct bkey_format *f = &b->keys.format;
struct bkey_packed *k;
for (k = start; k != end && bkey_next(k) != end; k = bkey_next(k)) {
- struct bkey l = bkey_unpack_key(f, k);
- struct bkey r = bkey_unpack_key(f, bkey_next(k));
+ struct bkey l = bkey_unpack_key(&b->keys, k);
+ struct bkey r = bkey_unpack_key(&b->keys, bkey_next(k));
BUG_ON(btree_node_is_extents(b)
? bkey_cmp(l.p, bkey_start_pos(&r)) > 0
@@ -167,7 +166,7 @@ static inline int sort_key_whiteouts_cmp(struct btree_keys *b,
struct bkey_packed *l,
struct bkey_packed *r)
{
- return bkey_cmp_packed(&b->format, l, r);
+ return bkey_cmp_packed(b, l, r);
}
static unsigned sort_key_whiteouts(struct bkey_packed *dst,
@@ -189,8 +188,8 @@ static inline int sort_extent_whiteouts_cmp(struct btree_keys *b,
struct bkey_packed *l,
struct bkey_packed *r)
{
- struct bkey ul = bkey_unpack_key(&b->format, l);
- struct bkey ur = bkey_unpack_key(&b->format, r);
+ struct bkey ul = bkey_unpack_key(b, l);
+ struct bkey ur = bkey_unpack_key(b, r);
return bkey_cmp(bkey_start_pos(&ul), bkey_start_pos(&ur));
}
@@ -214,7 +213,7 @@ static unsigned sort_extent_whiteouts(struct bkey_packed *dst,
EBUG_ON(bkeyp_val_u64s(f, in));
EBUG_ON(in->type != KEY_TYPE_DISCARD);
- r.k = bkey_unpack_key(f, in);
+ r.k = bkey_unpack_key(iter->b, in);
if (prev &&
bkey_cmp(l.k.p, bkey_start_pos(&r.k)) >= 0) {
@@ -480,7 +479,7 @@ static int sort_keys_cmp(struct btree_keys *b,
struct bkey_packed *l,
struct bkey_packed *r)
{
- return bkey_cmp_packed(&b->format, l, r) ?:
+ return bkey_cmp_packed(b, l, r) ?:
(int) bkey_whiteout(r) - (int) bkey_whiteout(l) ?:
(int) l->needs_whiteout - (int) r->needs_whiteout;
}
@@ -501,7 +500,7 @@ static unsigned sort_keys(struct bkey_packed *dst,
if (bkey_whiteout(in) &&
(next = sort_iter_peek(iter)) &&
- !bkey_cmp_packed(f, in, next)) {
+ !bkey_cmp_packed(iter->b, in, next)) {
BUG_ON(in->needs_whiteout &&
next->needs_whiteout);
next->needs_whiteout |= in->needs_whiteout;
@@ -524,7 +523,7 @@ static inline int sort_extents_cmp(struct btree_keys *b,
struct bkey_packed *l,
struct bkey_packed *r)
{
- return bkey_cmp_packed(&b->format, l, r) ?:
+ return bkey_cmp_packed(b, l, r) ?:
(int) bkey_deleted(l) - (int) bkey_deleted(r);
}
@@ -653,10 +652,10 @@ static void btree_node_sort(struct cache_set *c, struct btree *b,
static struct btree_nr_keys sort_repack(struct bset *dst,
struct btree_keys *src,
struct btree_node_iter *src_iter,
- struct bkey_format *in_f,
struct bkey_format *out_f,
bool filter_whiteouts)
{
+ struct bkey_format *in_f = &src->format;
struct bkey_packed *in, *out = bset_bkey_last(dst);
struct btree_nr_keys nr;
@@ -670,7 +669,7 @@ static struct btree_nr_keys sort_repack(struct bset *dst,
? in_f : &bch_bkey_format_current, in))
out->format = KEY_FORMAT_LOCAL_BTREE;
else
- bkey_unpack((void *) out, in_f, in);
+ bkey_unpack(src, (void *) out, in);
btree_keys_account_key_add(&nr, 0, out);
out = bkey_next(out);
@@ -685,7 +684,6 @@ static struct btree_nr_keys sort_repack_merge(struct cache_set *c,
struct bset *dst,
struct btree_keys *src,
struct btree_node_iter *iter,
- struct bkey_format *in_f,
struct bkey_format *out_f,
bool filter_whiteouts,
key_filter_fn filter,
@@ -705,7 +703,7 @@ static struct btree_nr_keys sort_repack_merge(struct cache_set *c,
* The filter might modify pointers, so we have to unpack the
* key and values to &tmp.k:
*/
- bkey_unpack(&tmp.k, in_f, k);
+ bkey_unpack(src, &tmp.k, k);
if (filter && filter(c, src, bkey_i_to_s(&tmp.k)))
continue;
@@ -764,7 +762,6 @@ void bch_btree_sort_into(struct cache_set *c,
btree_node_ops(src)->key_merge)
nr = sort_repack_merge(c, dst->keys.set->data,
&src->keys, &src_iter,
- &src->keys.format,
&dst->keys.format,
true,
btree_node_ops(src)->key_normalize,
@@ -772,7 +769,6 @@ void bch_btree_sort_into(struct cache_set *c,
else
nr = sort_repack(dst->keys.set->data,
&src->keys, &src_iter,
- &src->keys.format,
&dst->keys.format,
true);
@@ -889,7 +885,6 @@ static const char *validate_bset(struct cache_set *c, struct btree *b,
struct bset *i, unsigned sectors,
unsigned *whiteout_u64s)
{
- struct bkey_format *f = &b->keys.format;
struct bkey_packed *k, *prev = NULL;
bool seen_non_whiteout = false;
@@ -943,7 +938,7 @@ static const char *validate_bset(struct cache_set *c, struct btree *b,
if (BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN)
bch_bkey_swab(btree_node_type(b), &b->keys.format, k);
- u = bkey_disassemble(f, k, &tmp);
+ u = bkey_disassemble(&b->keys, k, &tmp);
invalid = btree_bkey_invalid(c, b, u);
if (invalid) {
@@ -968,7 +963,7 @@ static const char *validate_bset(struct cache_set *c, struct btree *b,
if (!seen_non_whiteout &&
(!bkey_whiteout(k) ||
- (prev && bkey_cmp_left_packed(f, prev,
+ (prev && bkey_cmp_left_packed(&b->keys, prev,
bkey_start_pos(u.k)) > 0))) {
*whiteout_u64s = k->_data - i->_data;
seen_non_whiteout = true;
@@ -1050,8 +1045,9 @@ void bch_btree_node_read_done(struct cache_set *c, struct btree *b,
if (err)
goto err;
- b->keys.format = b->data->format;
b->keys.set->data = &b->data->keys;
+
+ btree_node_set_format(&b->keys, b->data->format);
} else {
bne = write_block(b);
i = &bne->keys;
@@ -1127,8 +1123,7 @@ void bch_btree_node_read_done(struct cache_set *c, struct btree *b,
err = "short btree key";
if (b->keys.set[0].size &&
- bkey_cmp_packed(&b->keys.format, &b->key.k,
- &b->keys.set[0].end) < 0)
+ bkey_cmp_packed(&b->keys, &b->key.k, &b->keys.set[0].end) < 0)
goto err;
out:
diff --git a/drivers/md/bcache/btree_iter.c b/drivers/md/bcache/btree_iter.c
index cbfe35ce0ec4..e7b97fe6b6e6 100644
--- a/drivers/md/bcache/btree_iter.c
+++ b/drivers/md/bcache/btree_iter.c
@@ -287,7 +287,6 @@ int bch_btree_iter_unlock(struct btree_iter *iter)
static void __bch_btree_iter_verify(struct btree_iter *iter,
struct btree *b)
{
- const struct bkey_format *f = &b->keys.format;
struct btree_node_iter *node_iter = &iter->node_iters[b->level];
struct btree_node_iter tmp = *node_iter;
struct bkey_packed *k;
@@ -301,10 +300,10 @@ static void __bch_btree_iter_verify(struct btree_iter *iter,
k = b->level
? bch_btree_node_iter_prev(&tmp, &b->keys)
: bch_btree_node_iter_prev_all(&tmp, &b->keys);
- if (k && btree_iter_pos_cmp_packed(f, iter->pos, k,
+ if (k && btree_iter_pos_cmp_packed(&b->keys, iter->pos, k,
iter->is_extents)) {
char buf[100];
- struct bkey uk = bkey_unpack_key(&b->keys.format, k);
+ struct bkey uk = bkey_unpack_key(&b->keys, k);
bch_bkey_to_text(buf, sizeof(buf), &uk);
panic("prev key should be before after pos:\n%s\n%llu:%llu\n",
@@ -312,10 +311,10 @@ static void __bch_btree_iter_verify(struct btree_iter *iter,
}
k = bch_btree_node_iter_peek_all(node_iter, &b->keys);
- if (k && !btree_iter_pos_cmp_packed(f, iter->pos, k,
+ if (k && !btree_iter_pos_cmp_packed(&b->keys, iter->pos, k,
iter->is_extents)) {
char buf[100];
- struct bkey uk = bkey_unpack_key(&b->keys.format, k);
+ struct bkey uk = bkey_unpack_key(&b->keys, k);
bch_bkey_to_text(buf, sizeof(buf), &uk);
panic("next key should be before iter pos:\n%llu:%llu\n%s\n",
@@ -344,7 +343,6 @@ static void __bch_btree_node_iter_fix(struct btree_iter *iter,
unsigned clobber_u64s,
unsigned new_u64s)
{
- struct bkey_format *f = &b->keys.format;
const struct bkey_packed *end = bset_bkey_last(t->data);
struct btree_node_iter_set *set;
unsigned offset = __btree_node_key_to_offset(&b->keys, where);
@@ -357,7 +355,7 @@ static void __bch_btree_node_iter_fix(struct btree_iter *iter,
/* didn't find the bset in the iterator - might have to readd it: */
if (new_u64s &&
- btree_iter_pos_cmp_packed(f, iter->pos, where,
+ btree_iter_pos_cmp_packed(&b->keys, iter->pos, where,
iter->is_extents))
bch_btree_node_iter_push(node_iter, &b->keys, where, end);
return;
@@ -369,7 +367,7 @@ found:
return;
if (new_u64s &&
- btree_iter_pos_cmp_packed(f, iter->pos, where,
+ btree_iter_pos_cmp_packed(&b->keys, iter->pos, where,
iter->is_extents)) {
set->k = offset;
bch_btree_node_iter_sort(node_iter, &b->keys);
@@ -405,7 +403,7 @@ found:
* to.
*/
if (b->level && new_u64s && !bkey_deleted(where) &&
- btree_iter_pos_cmp_packed(f, iter->pos, where,
+ btree_iter_pos_cmp_packed(&b->keys, iter->pos, where,
iter->is_extents)) {
struct bset_tree *t;
struct bkey_packed *k;
@@ -472,10 +470,9 @@ void bch_btree_node_iter_fix(struct btree_iter *iter,
/* peek_all() doesn't skip deleted keys */
static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *iter)
{
- const struct bkey_format *f = &iter->nodes[iter->level]->keys.format;
+ struct btree_keys *b = &iter->nodes[iter->level]->keys;
struct bkey_packed *k =
- bch_btree_node_iter_peek_all(&iter->node_iters[iter->level],
- &iter->nodes[iter->level]->keys);
+ bch_btree_node_iter_peek_all(&iter->node_iters[iter->level], b);
struct bkey_s_c ret;
EBUG_ON(!btree_node_locked(iter, iter->level));
@@ -483,7 +480,7 @@ static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *iter)
if (!k)
return bkey_s_c_null;
- ret = bkey_disassemble(f, k, &iter->k);
+ ret = bkey_disassemble(b, k, &iter->k);
if (debug_check_bkeys(iter->c))
bkey_debugcheck(iter->c, iter->nodes[iter->level], ret);
@@ -493,10 +490,9 @@ static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *iter)
static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter)
{
- const struct bkey_format *f = &iter->nodes[iter->level]->keys.format;
+ struct btree_keys *b = &iter->nodes[iter->level]->keys;
struct bkey_packed *k =
- bch_btree_node_iter_peek(&iter->node_iters[iter->level],
- &iter->nodes[iter->level]->keys);
+ bch_btree_node_iter_peek(&iter->node_iters[iter->level], b);
struct bkey_s_c ret;
EBUG_ON(!btree_node_locked(iter, iter->level));
@@ -504,7 +500,7 @@ static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter)
if (!k)
return bkey_s_c_null;
- ret = bkey_disassemble(f, k, &iter->k);
+ ret = bkey_disassemble(b, k, &iter->k);
if (debug_check_bkeys(iter->c))
bkey_debugcheck(iter->c, iter->nodes[iter->level], ret);
@@ -539,10 +535,10 @@ static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b)
&iter->nodes[b->level + 1]->keys);
if (!k ||
bkey_deleted(k) ||
- bkey_cmp_left_packed(&iter->nodes[b->level + 1]->keys.format,
+ bkey_cmp_left_packed(&iter->nodes[b->level + 1]->keys,
k, b->key.k.p)) {
char buf[100];
- struct bkey uk = bkey_unpack_key(&b->keys.format, k);
+ struct bkey uk = bkey_unpack_key(&b->keys, k);
bch_bkey_to_text(buf, sizeof(buf), &uk);
panic("parent iter doesn't point to new node:\n%s\n%llu:%llu\n",
@@ -978,7 +974,7 @@ void bch_btree_iter_set_pos_same_leaf(struct btree_iter *iter, struct bpos new_p
EBUG_ON(bkey_cmp(new_pos, iter->nodes[0]->key.k.p) > 0);
while ((k = bch_btree_node_iter_peek_all(node_iter, b)) &&
- !btree_iter_pos_cmp_packed(&b->format, new_pos, k,
+ !btree_iter_pos_cmp_packed(b, new_pos, k,
iter->is_extents))
bch_btree_node_iter_advance(node_iter, b);
diff --git a/drivers/md/bcache/btree_update.c b/drivers/md/bcache/btree_update.c
index ea4c47438afd..6097a421c35d 100644
--- a/drivers/md/bcache/btree_update.c
+++ b/drivers/md/bcache/btree_update.c
@@ -35,7 +35,7 @@ void __bch_btree_calc_format(struct bkey_format_state *s, struct btree *b)
k != bset_bkey_last(t->data);
k = bkey_next(k))
if (!bkey_whiteout(k)) {
- uk = bkey_unpack_key(&b->keys.format, k);
+ uk = bkey_unpack_key(&b->keys, k);
bch_bkey_format_add_key(s, &uk);
}
}
@@ -319,7 +319,8 @@ struct btree *__btree_node_alloc_replacement(struct cache_set *c,
n->data->min_key = b->data->min_key;
n->data->max_key = b->data->max_key;
n->data->format = format;
- n->keys.format = format;
+
+ btree_node_set_format(&n->keys, format);
bch_btree_sort_into(c, n, b);
@@ -467,6 +468,8 @@ static struct btree *__btree_root_alloc(struct cache_set *c, unsigned level,
b->data->format = bch_btree_calc_format(b);
b->key.k.p = POS_MAX;
+ btree_node_set_format(&b->keys, b->data->format);
+
six_unlock_write(&b->lock);
return b;
@@ -623,7 +626,6 @@ static void bch_insert_fixup_btree_ptr(struct btree_iter *iter,
struct disk_reservation *disk_res)
{
struct cache_set *c = iter->c;
- const struct bkey_format *f = &b->keys.format;
struct bucket_stats_cache_set stats = { 0 };
struct bkey_packed *k;
struct bkey tmp;
@@ -634,16 +636,16 @@ static void bch_insert_fixup_btree_ptr(struct btree_iter *iter,
gc_pos_btree_node(b), &stats);
while ((k = bch_btree_node_iter_peek_all(node_iter, &b->keys)) &&
- !btree_iter_pos_cmp_packed(f, insert->k.p, k, false))
+ !btree_iter_pos_cmp_packed(&b->keys, insert->k.p, k, false))
bch_btree_node_iter_advance(node_iter, &b->keys);
/*
* If we're overwriting, look up pending delete and mark so that gc
* marks it on the pending delete list:
*/
- if (k && !bkey_cmp_packed(f, k, &insert->k))
+ if (k && !bkey_cmp_packed(&b->keys, k, &insert->k))
bch_btree_node_free_index(c, b, iter->btree_id,
- bkey_disassemble(f, k, &tmp),
+ bkey_disassemble(&b->keys, k, &tmp),
&stats);
bch_cache_set_stats_apply(c, &stats, disk_res, gc_pos_btree_node(b));
@@ -673,7 +675,7 @@ bool bch_btree_bset_insert_key(struct btree_iter *iter,
BUG_ON(insert->k.u64s > bch_btree_keys_u64s_remaining(iter->c, b));
k = bch_btree_node_iter_peek_all(node_iter, &b->keys);
- if (k && !bkey_cmp_packed(f, k, &insert->k)) {
+ if (k && !bkey_cmp_packed(&b->keys, k, &insert->k)) {
BUG_ON(bkey_whiteout(k));
t = bch_bkey_to_bset(&b->keys, k);
@@ -1143,7 +1145,6 @@ void bch_btree_interior_update_will_free_node(struct cache_set *c,
static void btree_node_interior_verify(struct btree *b)
{
- const struct bkey_format *f = &b->keys.format;
struct btree_node_iter iter;
struct bkey_packed *k;
@@ -1152,7 +1153,7 @@ static void btree_node_interior_verify(struct btree *b)
bch_btree_node_iter_init(&iter, &b->keys, b->key.k.p, false, false);
#if 1
BUG_ON(!(k = bch_btree_node_iter_peek(&iter, &b->keys)) ||
- bkey_cmp_left_packed(f, k, b->key.k.p));
+ bkey_cmp_left_packed(&b->keys, k, b->key.k.p));
BUG_ON((bch_btree_node_iter_advance(&iter, &b->keys),
!bch_btree_node_iter_end(&iter)));
@@ -1165,7 +1166,7 @@ static void btree_node_interior_verify(struct btree *b)
goto err;
msg = "isn't what it should be";
- if (bkey_cmp_left_packed(f, k, b->key.k.p))
+ if (bkey_cmp_left_packed(&b->keys, k, b->key.k.p))
goto err;
bch_btree_node_iter_advance(&iter, &b->keys);
@@ -1192,7 +1193,6 @@ bch_btree_insert_keys_interior(struct btree *b,
struct cache_set *c = iter->c;
struct btree_iter *linked;
struct btree_node_iter node_iter;
- const struct bkey_format *f = &b->keys.format;
struct bkey_i *insert = bch_keylist_front(insert_keys);
struct bkey_packed *k;
@@ -1218,7 +1218,7 @@ bch_btree_insert_keys_interior(struct btree *b,
* the node the iterator points to:
*/
while ((k = bch_btree_node_iter_prev_all(&node_iter, &b->keys)) &&
- (bkey_cmp_packed(f, k, &insert->k) >= 0))
+ (bkey_cmp_packed(&b->keys, k, &insert->k) >= 0))
;
while (!bch_keylist_empty(insert_keys)) {
@@ -1261,10 +1261,11 @@ static struct btree *__btree_split_node(struct btree_iter *iter, struct btree *n
n2 = bch_btree_node_alloc(iter->c, n1->level, iter->btree_id, reserve);
n2->data->max_key = n1->data->max_key;
- n2->keys.format = n1->keys.format;
n2->data->format = n1->keys.format;
n2->key.k.p = n1->key.k.p;
+ btree_node_set_format(&n2->keys, n2->data->format);
+
set1 = btree_bset_first(n1);
set2 = btree_bset_first(n2);
@@ -1290,7 +1291,7 @@ static struct btree *__btree_split_node(struct btree_iter *iter, struct btree *n
BUG_ON(!prev);
- n1->key.k.p = bkey_unpack_key(&n1->keys.format, prev).p;
+ n1->key.k.p = bkey_unpack_key(&n1->keys, prev).p;
n1->data->max_key = n1->key.k.p;
n2->data->min_key =
btree_type_successor(n1->btree_id, n1->key.k.p);
@@ -1604,7 +1605,7 @@ static struct btree *btree_node_get_sibling(struct btree_iter *iter,
node_iter = iter->node_iters[parent->level];
k = bch_btree_node_iter_peek_all(&node_iter, &parent->keys);
- BUG_ON(bkey_cmp_left_packed(&parent->keys.format, k, b->key.k.p));
+ BUG_ON(bkey_cmp_left_packed(&parent->keys, k, b->key.k.p));
do {
k = sib == btree_prev_sib
@@ -1615,7 +1616,7 @@ static struct btree *btree_node_get_sibling(struct btree_iter *iter,
return NULL;
} while (bkey_deleted(k));
- bkey_unpack(&tmp.k, &parent->keys.format, k);
+ bkey_unpack(&parent->keys, &tmp.k, k);
ret = bch_btree_node_get(iter, &tmp.k, level, SIX_LOCK_intent);
@@ -1737,9 +1738,10 @@ retry:
n->data->min_key = prev->data->min_key;
n->data->max_key = next->data->max_key;
n->data->format = new_f;
- n->keys.format = new_f;
n->key.k.p = next->key.k.p;
+ btree_node_set_format(&n->keys, new_f);
+
bch_btree_sort_into(c, n, prev);
bch_btree_sort_into(c, n, next);
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
index 1758441c0388..b3aabf753941 100644
--- a/drivers/md/bcache/extents.c
+++ b/drivers/md/bcache/extents.c
@@ -44,7 +44,7 @@ static void sort_key_next(struct btree_node_iter *iter,
*/
#define key_sort_cmp(l, r) \
({ \
- int _c = bkey_cmp_packed(&b->format, \
+ int _c = bkey_cmp_packed(b, \
__btree_node_offset_to_key(b, (l).k), \
__btree_node_offset_to_key(b, (r).k)); \
\
@@ -54,7 +54,6 @@ static void sort_key_next(struct btree_node_iter *iter,
static inline bool should_drop_next_key(struct btree_node_iter *iter,
struct btree_keys *b)
{
- const struct bkey_format *f = &b->format;
struct btree_node_iter_set *l = iter->data, *r = iter->data + 1;
struct bkey_packed *k = __btree_node_offset_to_key(b, l->k);
@@ -73,7 +72,7 @@ static inline bool should_drop_next_key(struct btree_node_iter *iter,
* comes first; so if l->k compares equal to r->k then l->k is older and
* should be dropped.
*/
- return !bkey_cmp_packed(f,
+ return !bkey_cmp_packed(b,
__btree_node_offset_to_key(b, l->k),
__btree_node_offset_to_key(b, r->k));
}
@@ -707,10 +706,9 @@ static void extent_save(struct btree_keys *b, struct btree_node_iter *iter,
*/
#define extent_sort_cmp(l, r) \
({ \
- const struct bkey_format *_f = &b->format; \
- struct bkey _ul = bkey_unpack_key(_f, \
+ struct bkey _ul = bkey_unpack_key(b, \
__btree_node_offset_to_key(b, (l).k)); \
- struct bkey _ur = bkey_unpack_key(_f, \
+ struct bkey _ur = bkey_unpack_key(b, \
__btree_node_offset_to_key(b, (r).k)); \
\
int _c = bkey_cmp(bkey_start_pos(&_ul), bkey_start_pos(&_ur)); \
@@ -744,7 +742,7 @@ static void extent_sort_append(struct cache_set *c,
if (bkey_whiteout(k))
return;
- bkey_unpack(&tmp.k, f, k);
+ bkey_unpack(b, &tmp.k, k);
if (*prev &&
bch_extent_merge(c, b, (void *) *prev, &tmp.k))
@@ -794,8 +792,8 @@ struct btree_nr_keys bch_extent_sort_fix_overlapping(struct cache_set *c,
rk = __btree_node_offset_to_key(b, _r->k);
- l = __bkey_disassemble(f, lk, &l_unpacked);
- r = __bkey_disassemble(f, rk, &r_unpacked);
+ l = __bkey_disassemble(b, lk, &l_unpacked);
+ r = __bkey_disassemble(b, rk, &r_unpacked);
/* If current key and next key don't overlap, just append */
if (bkey_cmp(l.k->p, bkey_start_pos(r.k)) <= 0) {
@@ -1405,7 +1403,6 @@ bch_delete_fixup_extent(struct extent_insert_state *s)
struct btree_iter *iter = s->insert->iter;
struct btree *b = iter->nodes[0];
struct btree_node_iter *node_iter = &iter->node_iters[0];
- const struct bkey_format *f = &b->keys.format;
struct bkey_packed *_k;
struct bkey unpacked;
struct bkey_i *insert = s->insert->k;
@@ -1420,7 +1417,7 @@ bch_delete_fixup_extent(struct extent_insert_state *s)
(ret = extent_insert_should_stop(s)) == BTREE_INSERT_OK &&
(_k = bch_btree_node_iter_peek_all(node_iter, &b->keys))) {
struct bset_tree *t = bch_bkey_to_bset(&b->keys, _k);
- struct bkey_s k = __bkey_disassemble(f, _k, &unpacked);
+ struct bkey_s k = __bkey_disassemble(&b->keys, _k, &unpacked);
enum bch_extent_overlap overlap;
EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k)));
@@ -1560,7 +1557,6 @@ bch_insert_fixup_extent(struct btree_insert *trans,
struct btree_iter *iter = insert->iter;
struct btree *b = iter->nodes[0];
struct btree_node_iter *node_iter = &iter->node_iters[0];
- const struct bkey_format *f = &b->keys.format;
struct bkey_packed *_k;
struct bkey unpacked;
enum btree_insert_ret ret = BTREE_INSERT_OK;
@@ -1595,7 +1591,7 @@ bch_insert_fixup_extent(struct btree_insert *trans,
(ret = extent_insert_should_stop(&s)) == BTREE_INSERT_OK &&
(_k = bch_btree_node_iter_peek_all(node_iter, &b->keys))) {
struct bset_tree *t = bch_bkey_to_bset(&b->keys, _k);
- struct bkey_s k = __bkey_disassemble(f, _k, &unpacked);
+ struct bkey_s k = __bkey_disassemble(&b->keys, _k, &unpacked);
enum bch_extent_overlap overlap;
EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
@@ -2353,12 +2349,11 @@ static bool extent_merge_do_overlapping(struct btree_iter *iter,
{
struct btree_keys *b = &iter->nodes[0]->keys;
struct btree_node_iter *node_iter = &iter->node_iters[0];
- const struct bkey_format *f = &b->format;
struct bset_tree *t;
struct bkey_packed *k;
struct bkey uk;
struct bpos new_pos = back_merge ? m->p : bkey_start_pos(m);
- bool could_pack = bkey_pack_pos((void *) &uk, new_pos, f);
+ bool could_pack = bkey_pack_pos((void *) &uk, new_pos, b);
bool check = true;
/*
@@ -2395,7 +2390,7 @@ do_fixup:
*/
for (;
k &&
- (uk = bkey_unpack_key(f, k),
+ (uk = bkey_unpack_key(b, k),
bkey_cmp(uk.p, bkey_start_pos(m)) > 0);
k = bkey_prev_all(t, k)) {
if (bkey_cmp(uk.p, m->p) >= 0)
@@ -2409,7 +2404,7 @@ do_fixup:
/* Front merge - walk forwards */
for (;
k != bset_bkey_last(t->data) &&
- (uk = bkey_unpack_key(f, k),
+ (uk = bkey_unpack_key(b, k),
bkey_cmp(uk.p, m->p) < 0);
k = bkey_next(k)) {
if (bkey_cmp(uk.p,
@@ -2459,8 +2454,8 @@ static bool bch_extent_merge_inline(struct cache_set *c,
* We need to save copies of both l and r, because we might get a
* partial merge (which modifies both) and then fails to repack
*/
- bkey_unpack(&li.k, f, l);
- bkey_unpack(&ri.k, f, r);
+ bkey_unpack(b, &li.k, l);
+ bkey_unpack(b, &ri.k, r);
m = back_merge ? l : r;
mi = back_merge ? &li.k : &ri.k;
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index 015227e243f0..129ecb638ae5 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -635,4 +635,82 @@ size_t bch_rand_range(size_t);
void memcpy_to_bio(struct bio *, struct bvec_iter, void *);
void memcpy_from_bio(void *, struct bio *, struct bvec_iter);
+static inline void __memcpy_u64s(void *dst, const void *src,
+ unsigned u64s)
+{
+#ifdef CONFIG_X86_64
+ long d0, d1, d2;
+ asm volatile("rep ; movsq"
+ : "=&c" (d0), "=&D" (d1), "=&S" (d2)
+ : "0" (u64s), "1" (dst), "2" (src)
+ : "memory");
+#else
+ u64 *d = dst;
+ const u64 *s = src;
+
+ while (u64s--)
+ *d++ = *s++;
+#endif
+}
+
+static inline void memcpy_u64s(void *dst, const void *src,
+ unsigned u64s)
+{
+ EBUG_ON(!(dst >= src + u64s * sizeof(u64) ||
+ dst + u64s * sizeof(u64) <= src));
+
+ __memcpy_u64s(dst, src, u64s);
+}
+
+static inline void __memmove_u64s_down(void *dst, const void *src,
+ unsigned u64s)
+{
+ __memcpy_u64s(dst, src, u64s);
+}
+
+static inline void memmove_u64s_down(void *dst, const void *src,
+ unsigned u64s)
+{
+ EBUG_ON(dst > src);
+
+ __memmove_u64s_down(dst, src, u64s);
+}
+
+static inline void __memmove_u64s_up(void *_dst, const void *_src,
+ unsigned u64s)
+{
+ u64 *dst = (u64 *) _dst + u64s - 1;
+ u64 *src = (u64 *) _src + u64s - 1;
+
+#ifdef CONFIG_X86_64
+ long d0, d1, d2;
+ asm volatile("std ;\n"
+ "rep ; movsq\n"
+ "cld ;\n"
+ : "=&c" (d0), "=&D" (d1), "=&S" (d2)
+ : "0" (u64s), "1" (dst), "2" (src)
+ : "memory");
+#else
+ while (u64s--)
+ *dst-- = *src--;
+#endif
+}
+
+static inline void memmove_u64s_up(void *dst, const void *src,
+ unsigned u64s)
+{
+ EBUG_ON(dst < src);
+
+ __memmove_u64s_up(dst, src, u64s);
+}
+
+static inline void memmove_u64s(void *dst, const void *src,
+ unsigned u64s)
+{
+ if (dst < src)
+ __memmove_u64s_down(dst, src, u64s);
+ else
+ __memmove_u64s_up(dst, src, u64s);
+}
+
#endif /* _BCACHE_UTIL_H */