diff options
author | Kent Overstreet <kent.overstreet@linux.dev> | 2022-11-24 03:12:22 -0500 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@linux.dev> | 2023-01-06 19:47:59 -0500 |
commit | f1da250437c24e8fc7c95281271e40aacd1361ff (patch) | |
tree | 1deba2b884cd0b1965f08e9e0c8941ab360dfbf5 | |
parent | 4b408a5f167fcc8d80e3db171bdbc6241cd10a29 (diff) |
bcachefs: New bpos_cmp(), bkey_cmp() replacements
This patch introduces
- bpos_eq()
- bpos_lt()
- bpos_le()
- bpos_gt()
- bpos_ge()
and equivalent replacements for bkey_cmp().
Looking at the generated assembly these could probably be improved
further, but we already see a significant code size improvement with
this patch.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
32 files changed, 245 insertions, 190 deletions
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c index 742391e9ba17..1acdee8226eb 100644 --- a/fs/bcachefs/alloc_background.c +++ b/fs/bcachefs/alloc_background.c @@ -1102,7 +1102,7 @@ static int bch2_discard_one_bucket(struct btree_trans *trans, goto out; } - if (bkey_cmp(*discard_pos_done, iter.pos) && + if (!bkey_eq(*discard_pos_done, iter.pos) && ca->mi.discard && !c->opts.nochanges) { /* * This works without any other locks because this is the only diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c index 8e9a17decdd5..1f0665f841b3 100644 --- a/fs/bcachefs/alloc_foreground.c +++ b/fs/bcachefs/alloc_foreground.c @@ -419,7 +419,7 @@ bch2_bucket_alloc_early(struct btree_trans *trans, BTREE_ITER_SLOTS, k, ret) { struct bch_alloc_v4 a; - if (bkey_cmp(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)) >= 0) + if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets))) break; if (ca->new_fs_bucket_idx && diff --git a/fs/bcachefs/backpointers.c b/fs/bcachefs/backpointers.c index 466a63c76827..c5afb154b14b 100644 --- a/fs/bcachefs/backpointers.c +++ b/fs/bcachefs/backpointers.c @@ -36,7 +36,7 @@ static inline struct bpos bucket_pos_to_bp(const struct bch_fs *c, (bucket_to_sector(ca, bucket.offset) << MAX_EXTENT_COMPRESS_RATIO_SHIFT) + bucket_offset); - BUG_ON(bkey_cmp(bucket, bp_pos_to_bucket(c, ret))); + BUG_ON(!bkey_eq(bucket, bp_pos_to_bucket(c, ret))); return ret; } @@ -60,7 +60,7 @@ static bool extent_matches_bp(struct bch_fs *c, bch2_extent_ptr_to_bp(c, btree_id, level, k, p, &bucket2, &bp2); - if (!bpos_cmp(bucket, bucket2) && + if (bpos_eq(bucket, bucket2) && !memcmp(&bp, &bp2, sizeof(bp))) return true; } @@ -79,7 +79,7 @@ int bch2_backpointer_invalid(const struct bch_fs *c, struct bkey_s_c k, return -BCH_ERR_invalid_bkey; } - if (bpos_cmp(bp.k->p, bucket_pos_to_bp(c, bucket, bp.v->bucket_offset))) { + if (!bpos_eq(bp.k->p, bucket_pos_to_bp(c, bucket, bp.v->bucket_offset))) { prt_str(err, "backpointer at wrong pos"); return -BCH_ERR_invalid_bkey; } @@ -434,7 +434,7 @@ int bch2_get_next_backpointer(struct btree_trans *trans, for_each_btree_key_norestart(trans, bp_iter, BTREE_ID_backpointers, bp_pos, 0, k, ret) { - if (bpos_cmp(k.k->p, bp_end_pos) >= 0) + if (bpos_ge(k.k->p, bp_end_pos)) break; if (k.k->type != KEY_TYPE_backpointer) @@ -646,8 +646,8 @@ static int check_bp_exists(struct btree_trans *trans, struct bkey_s_c alloc_k, bp_k; int ret; - if (bpos_cmp(bucket_pos, bucket_start) < 0 || - bpos_cmp(bucket_pos, bucket_end) > 0) + if (bpos_lt(bucket_pos, bucket_start) || + bpos_gt(bucket_pos, bucket_end)) return 0; bch2_trans_iter_init(trans, &alloc_iter, BTREE_ID_alloc, bucket_pos, 0); @@ -934,8 +934,8 @@ int bch2_get_alloc_in_memory_pos(struct btree_trans *trans, break; } - if (bpos_cmp(alloc_iter.pos, SPOS_MAX) && - bpos_cmp(bucket_pos_to_bp(trans->c, alloc_iter.pos, 0), bp_iter.pos) < 0) { + if (bpos_lt(alloc_iter.pos, SPOS_MAX) && + bpos_lt(bucket_pos_to_bp(trans->c, alloc_iter.pos, 0), bp_iter.pos)) { if (!bch2_btree_iter_advance(&alloc_iter)) alloc_end = true; } else { @@ -960,11 +960,11 @@ int bch2_check_extents_to_backpointers(struct bch_fs *c) if (ret) break; - if (!bpos_cmp(start, POS_MIN) && bpos_cmp(end, SPOS_MAX)) + if (bpos_eq(start, POS_MIN) && !bpos_eq(end, SPOS_MAX)) bch_verbose(c, "%s(): alloc info does not fit in ram, running in multiple passes with %zu nodes per pass", __func__, btree_nodes_fit_in_ram(c)); - if (bpos_cmp(start, POS_MIN) || bpos_cmp(end, SPOS_MAX)) { + if (!bpos_eq(start, POS_MIN) || !bpos_eq(end, SPOS_MAX)) { struct printbuf buf = PRINTBUF; prt_str(&buf, "check_extents_to_backpointers(): "); @@ -977,7 +977,7 @@ int bch2_check_extents_to_backpointers(struct bch_fs *c) } ret = bch2_check_extents_to_backpointers_pass(&trans, start, end); - if (ret || !bpos_cmp(end, SPOS_MAX)) + if (ret || bpos_eq(end, SPOS_MAX)) break; start = bpos_successor(end); diff --git a/fs/bcachefs/bkey.h b/fs/bcachefs/bkey.h index 788c42462dd6..14600ca41eb6 100644 --- a/fs/bcachefs/bkey.h +++ b/fs/bcachefs/bkey.h @@ -136,6 +136,37 @@ static inline int bkey_cmp_left_packed_byval(const struct btree *b, return bkey_cmp_left_packed(b, l, &r); } +static __always_inline bool bpos_eq(struct bpos l, struct bpos r) +{ + return !((l.inode ^ r.inode) | + (l.offset ^ r.offset) | + (l.snapshot ^ r.snapshot)); +} + +static __always_inline bool bpos_lt(struct bpos l, struct bpos r) +{ + return l.inode != r.inode ? l.inode < r.inode : + l.offset != r.offset ? l.offset < r.offset : + l.snapshot != r.snapshot ? l.snapshot < r.snapshot : false; +} + +static __always_inline bool bpos_le(struct bpos l, struct bpos r) +{ + return l.inode != r.inode ? l.inode < r.inode : + l.offset != r.offset ? l.offset < r.offset : + l.snapshot != r.snapshot ? l.snapshot < r.snapshot : true; +} + +static __always_inline bool bpos_gt(struct bpos l, struct bpos r) +{ + return bpos_lt(r, l); +} + +static __always_inline bool bpos_ge(struct bpos l, struct bpos r) +{ + return bpos_le(r, l); +} + static __always_inline int bpos_cmp(struct bpos l, struct bpos r) { return cmp_int(l.inode, r.inode) ?: @@ -143,6 +174,36 @@ static __always_inline int bpos_cmp(struct bpos l, struct bpos r) cmp_int(l.snapshot, r.snapshot); } +static __always_inline bool bkey_eq(struct bpos l, struct bpos r) +{ + return !((l.inode ^ r.inode) | + (l.offset ^ r.offset)); +} + +static __always_inline bool bkey_lt(struct bpos l, struct bpos r) +{ + return l.inode != r.inode + ? l.inode < r.inode + : l.offset < r.offset; +} + +static __always_inline bool bkey_le(struct bpos l, struct bpos r) +{ + return l.inode != r.inode + ? l.inode < r.inode + : l.offset <= r.offset; +} + +static __always_inline bool bkey_gt(struct bpos l, struct bpos r) +{ + return bkey_lt(r, l); +} + +static __always_inline bool bkey_ge(struct bpos l, struct bpos r) +{ + return bkey_le(r, l); +} + static __always_inline int bkey_cmp(struct bpos l, struct bpos r) { return cmp_int(l.inode, r.inode) ?: @@ -151,12 +212,12 @@ static __always_inline int bkey_cmp(struct bpos l, struct bpos r) static inline struct bpos bpos_min(struct bpos l, struct bpos r) { - return bpos_cmp(l, r) < 0 ? l : r; + return bpos_lt(l, r) ? l : r; } static inline struct bpos bpos_max(struct bpos l, struct bpos r) { - return bpos_cmp(l, r) > 0 ? l : r; + return bpos_gt(l, r) ? l : r; } void bch2_bpos_swab(struct bpos *); diff --git a/fs/bcachefs/bkey_methods.c b/fs/bcachefs/bkey_methods.c index 31cfc65e75c7..c7c0a9781a35 100644 --- a/fs/bcachefs/bkey_methods.c +++ b/fs/bcachefs/bkey_methods.c @@ -250,7 +250,7 @@ int __bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k, } if (type != BKEY_TYPE_btree && - !bkey_cmp(k.k->p, POS_MAX)) { + bkey_eq(k.k->p, POS_MAX)) { prt_printf(err, "key at POS_MAX"); return -BCH_ERR_invalid_bkey; } @@ -269,12 +269,12 @@ int bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k, int bch2_bkey_in_btree_node(struct btree *b, struct bkey_s_c k, struct printbuf *err) { - if (bpos_cmp(k.k->p, b->data->min_key) < 0) { + if (bpos_lt(k.k->p, b->data->min_key)) { prt_printf(err, "key before start of btree node"); return -BCH_ERR_invalid_bkey; } - if (bpos_cmp(k.k->p, b->data->max_key) > 0) { + if (bpos_gt(k.k->p, b->data->max_key)) { prt_printf(err, "key past end of btree node"); return -BCH_ERR_invalid_bkey; } @@ -284,11 +284,11 @@ int bch2_bkey_in_btree_node(struct btree *b, struct bkey_s_c k, void bch2_bpos_to_text(struct printbuf *out, struct bpos pos) { - if (!bpos_cmp(pos, POS_MIN)) + if (bpos_eq(pos, POS_MIN)) prt_printf(out, "POS_MIN"); - else if (!bpos_cmp(pos, POS_MAX)) + else if (bpos_eq(pos, POS_MAX)) prt_printf(out, "POS_MAX"); - else if (!bpos_cmp(pos, SPOS_MAX)) + else if (bpos_eq(pos, SPOS_MAX)) prt_printf(out, "SPOS_MAX"); else { if (pos.inode == U64_MAX) diff --git a/fs/bcachefs/bkey_methods.h b/fs/bcachefs/bkey_methods.h index 81e9a8a7439b..6b0d81325c92 100644 --- a/fs/bcachefs/bkey_methods.h +++ b/fs/bcachefs/bkey_methods.h @@ -60,7 +60,7 @@ static inline bool bch2_bkey_maybe_mergable(const struct bkey *l, const struct b { return l->type == r->type && !bversion_cmp(l->version, r->version) && - !bpos_cmp(l->p, bkey_start_pos(r)); + bpos_eq(l->p, bkey_start_pos(r)); } bool bch2_bkey_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c); diff --git a/fs/bcachefs/bset.c b/fs/bcachefs/bset.c index f22609bad54e..89478fc57411 100644 --- a/fs/bcachefs/bset.c +++ b/fs/bcachefs/bset.c @@ -86,13 +86,12 @@ void bch2_dump_bset(struct bch_fs *c, struct btree *b, n = bkey_unpack_key(b, _n); - if (bpos_cmp(n.p, k.k->p) < 0) { + if (bpos_lt(n.p, k.k->p)) { printk(KERN_ERR "Key skipped backwards\n"); continue; } - if (!bkey_deleted(k.k) && - !bpos_cmp(n.p, k.k->p)) + if (!bkey_deleted(k.k) && bpos_eq(n.p, k.k->p)) printk(KERN_ERR "Duplicate keys\n"); } @@ -533,7 +532,7 @@ static void bch2_bset_verify_rw_aux_tree(struct btree *b, goto start; while (1) { if (rw_aux_to_bkey(b, t, j) == k) { - BUG_ON(bpos_cmp(rw_aux_tree(b, t)[j].k, + BUG_ON(!bpos_eq(rw_aux_tree(b, t)[j].k, bkey_unpack_pos(b, k))); start: if (++j == t->size) @@ -1068,7 +1067,7 @@ static struct bkey_packed *bset_search_write_set(const struct btree *b, while (l + 1 != r) { unsigned m = (l + r) >> 1; - if (bpos_cmp(rw_aux_tree(b, t)[m].k, *search) < 0) + if (bpos_lt(rw_aux_tree(b, t)[m].k, *search)) l = m; else r = m; @@ -1321,8 +1320,8 @@ void bch2_btree_node_iter_init(struct btree_node_iter *iter, struct bkey_packed *k[MAX_BSETS]; unsigned i; - EBUG_ON(bpos_cmp(*search, b->data->min_key) < 0); - EBUG_ON(bpos_cmp(*search, b->data->max_key) > 0); + EBUG_ON(bpos_lt(*search, b->data->min_key)); + EBUG_ON(bpos_gt(*search, b->data->max_key)); bset_aux_tree_verify(b); memset(iter, 0, sizeof(*iter)); diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c index 51288d76ee24..2b48db53f9d0 100644 --- a/fs/bcachefs/btree_cache.c +++ b/fs/bcachefs/btree_cache.c @@ -833,9 +833,9 @@ static inline void btree_check_header(struct bch_fs *c, struct btree *b) { if (b->c.btree_id != BTREE_NODE_ID(b->data) || b->c.level != BTREE_NODE_LEVEL(b->data) || - bpos_cmp(b->data->max_key, b->key.k.p) || + !bpos_eq(b->data->max_key, b->key.k.p) || (b->key.k.type == KEY_TYPE_btree_ptr_v2 && - bpos_cmp(b->data->min_key, + !bpos_eq(b->data->min_key, bkey_i_to_btree_ptr_v2(&b->key)->v.min_key))) btree_bad_header(c, b); } diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c index 20e804ecb104..529db171b834 100644 --- a/fs/bcachefs/btree_gc.c +++ b/fs/bcachefs/btree_gc.c @@ -76,7 +76,7 @@ static int bch2_gc_check_topology(struct bch_fs *c, if (cur.k->k.type == KEY_TYPE_btree_ptr_v2) { struct bkey_i_btree_ptr_v2 *bp = bkey_i_to_btree_ptr_v2(cur.k); - if (bpos_cmp(expected_start, bp->v.min_key)) { + if (!bpos_eq(expected_start, bp->v.min_key)) { bch2_topology_error(c); if (bkey_deleted(&prev->k->k)) { @@ -106,7 +106,7 @@ static int bch2_gc_check_topology(struct bch_fs *c, } } - if (is_last && bpos_cmp(cur.k->k.p, node_end)) { + if (is_last && !bpos_eq(cur.k->k.p, node_end)) { bch2_topology_error(c); printbuf_reset(&buf1); @@ -274,12 +274,12 @@ static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b, bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(&cur->key)); if (prev && - bpos_cmp(expected_start, cur->data->min_key) > 0 && + bpos_gt(expected_start, cur->data->min_key) && BTREE_NODE_SEQ(cur->data) > BTREE_NODE_SEQ(prev->data)) { /* cur overwrites prev: */ - if (mustfix_fsck_err_on(bpos_cmp(prev->data->min_key, - cur->data->min_key) >= 0, c, + if (mustfix_fsck_err_on(bpos_ge(prev->data->min_key, + cur->data->min_key), c, "btree node overwritten by next node at btree %s level %u:\n" " node %s\n" " next %s", @@ -289,7 +289,7 @@ static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b, goto out; } - if (mustfix_fsck_err_on(bpos_cmp(prev->key.k.p, + if (mustfix_fsck_err_on(!bpos_eq(prev->key.k.p, bpos_predecessor(cur->data->min_key)), c, "btree node with incorrect max_key at btree %s level %u:\n" " node %s\n" @@ -301,8 +301,8 @@ static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b, } else { /* prev overwrites cur: */ - if (mustfix_fsck_err_on(bpos_cmp(expected_start, - cur->data->max_key) >= 0, c, + if (mustfix_fsck_err_on(bpos_ge(expected_start, + cur->data->max_key), c, "btree node overwritten by prev node at btree %s level %u:\n" " prev %s\n" " node %s", @@ -312,7 +312,7 @@ static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b, goto out; } - if (mustfix_fsck_err_on(bpos_cmp(expected_start, cur->data->min_key), c, + if (mustfix_fsck_err_on(!bpos_eq(expected_start, cur->data->min_key), c, "btree node with incorrect min_key at btree %s level %u:\n" " prev %s\n" " node %s", @@ -336,7 +336,7 @@ static int btree_repair_node_end(struct bch_fs *c, struct btree *b, bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(&child->key)); bch2_bpos_to_text(&buf2, b->key.k.p); - if (mustfix_fsck_err_on(bpos_cmp(child->key.k.p, b->key.k.p), c, + if (mustfix_fsck_err_on(!bpos_eq(child->key.k.p, b->key.k.p), c, "btree node with incorrect max_key at btree %s level %u:\n" " %s\n" " expected %s", @@ -374,8 +374,8 @@ again: bch2_btree_and_journal_iter_init_node_iter(&iter, c, b); while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) { - BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0); - BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0); + BUG_ON(bpos_lt(k.k->p, b->data->min_key)); + BUG_ON(bpos_gt(k.k->p, b->data->max_key)); bch2_btree_and_journal_iter_advance(&iter); bch2_bkey_buf_reassemble(&cur_k, c, k); @@ -912,8 +912,8 @@ static int bch2_gc_btree_init_recurse(struct btree_trans *trans, struct btree *b bkey_init(&prev.k->k); while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) { - BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0); - BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0); + BUG_ON(bpos_lt(k.k->p, b->data->min_key)); + BUG_ON(bpos_gt(k.k->p, b->data->max_key)); ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level, false, &k, true); @@ -1018,7 +1018,7 @@ static int bch2_gc_btree_init(struct btree_trans *trans, six_lock_read(&b->c.lock, NULL, NULL); printbuf_reset(&buf); bch2_bpos_to_text(&buf, b->data->min_key); - if (mustfix_fsck_err_on(bpos_cmp(b->data->min_key, POS_MIN), c, + if (mustfix_fsck_err_on(!bpos_eq(b->data->min_key, POS_MIN), c, "btree root with incorrect min_key: %s", buf.buf)) { bch_err(c, "repair unimplemented"); ret = -BCH_ERR_fsck_repair_unimplemented; @@ -1027,7 +1027,7 @@ static int bch2_gc_btree_init(struct btree_trans *trans, printbuf_reset(&buf); bch2_bpos_to_text(&buf, b->data->max_key); - if (mustfix_fsck_err_on(bpos_cmp(b->data->max_key, SPOS_MAX), c, + if (mustfix_fsck_err_on(!bpos_eq(b->data->max_key, SPOS_MAX), c, "btree root with incorrect max_key: %s", buf.buf)) { bch_err(c, "repair unimplemented"); ret = -BCH_ERR_fsck_repair_unimplemented; @@ -1344,7 +1344,7 @@ static int bch2_alloc_write_key(struct btree_trans *trans, enum bch_data_type type; int ret; - if (bkey_cmp(iter->pos, POS(ca->dev_idx, ca->mi.nbuckets)) >= 0) + if (bkey_ge(iter->pos, POS(ca->dev_idx, ca->mi.nbuckets))) return 1; bch2_alloc_to_v4(k, &old); diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c index 2f6595433b54..78448d99eb44 100644 --- a/fs/bcachefs/btree_io.c +++ b/fs/bcachefs/btree_io.c @@ -83,7 +83,7 @@ static void verify_no_dups(struct btree *b, struct bkey l = bkey_unpack_key(b, p); struct bkey r = bkey_unpack_key(b, k); - BUG_ON(bpos_cmp(l.p, bkey_start_pos(&r)) >= 0); + BUG_ON(bpos_ge(l.p, bkey_start_pos(&r))); } #endif } @@ -653,8 +653,8 @@ void bch2_btree_node_drop_keys_outside_node(struct btree *b) bch2_btree_build_aux_trees(b); for_each_btree_node_key_unpack(b, k, &iter, &unpacked) { - BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0); - BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0); + BUG_ON(bpos_lt(k.k->p, b->data->min_key)); + BUG_ON(bpos_gt(k.k->p, b->data->max_key)); } } @@ -752,7 +752,7 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca, b->data->max_key = b->key.k.p; } - btree_err_on(bpos_cmp(b->data->min_key, bp->min_key), + btree_err_on(!bpos_eq(b->data->min_key, bp->min_key), BTREE_ERR_MUST_RETRY, c, ca, b, NULL, "incorrect min_key: got %s should be %s", (printbuf_reset(&buf1), @@ -761,7 +761,7 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca, bch2_bpos_to_text(&buf2, bp->min_key), buf2.buf)); } - btree_err_on(bpos_cmp(bn->max_key, b->key.k.p), + btree_err_on(!bpos_eq(bn->max_key, b->key.k.p), BTREE_ERR_MUST_RETRY, c, ca, b, i, "incorrect max key %s", (printbuf_reset(&buf1), diff --git a/fs/bcachefs/btree_io.h b/fs/bcachefs/btree_io.h index 4b1810ad7d91..a720dd74139b 100644 --- a/fs/bcachefs/btree_io.h +++ b/fs/bcachefs/btree_io.h @@ -201,7 +201,7 @@ static inline void compat_btree_node(unsigned level, enum btree_id btree_id, { if (version < bcachefs_metadata_version_inode_btree_change && btree_node_type_is_extents(btree_id) && - bpos_cmp(bn->min_key, POS_MIN) && + !bpos_eq(bn->min_key, POS_MIN) && write) bn->min_key = bpos_nosnap_predecessor(bn->min_key); @@ -218,7 +218,7 @@ static inline void compat_btree_node(unsigned level, enum btree_id btree_id, if (version < bcachefs_metadata_version_inode_btree_change && btree_node_type_is_extents(btree_id) && - bpos_cmp(bn->min_key, POS_MIN) && + !bpos_eq(bn->min_key, POS_MIN) && !write) bn->min_key = bpos_nosnap_successor(bn->min_key); } diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index 6af99edfe132..ddd22dfab45e 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -93,7 +93,7 @@ static inline struct bpos btree_iter_search_key(struct btree_iter *iter) struct bpos pos = iter->pos; if ((iter->flags & BTREE_ITER_IS_EXTENTS) && - bkey_cmp(pos, POS_MAX)) + !bkey_eq(pos, POS_MAX)) pos = bkey_successor(iter, pos); return pos; } @@ -101,13 +101,13 @@ static inline struct bpos btree_iter_search_key(struct btree_iter *iter) static inline bool btree_path_pos_before_node(struct btree_path *path, struct btree *b) { - return bpos_cmp(path->pos, b->data->min_key) < 0; + return bpos_lt(path->pos, b->data->min_key); } static inline bool btree_path_pos_after_node(struct btree_path *path, struct btree *b) { - return bpos_cmp(b->key.k.p, path->pos) < 0; + return bpos_gt(path->pos, b->key.k.p); } static inline bool btree_path_pos_in_node(struct btree_path *path, @@ -133,7 +133,7 @@ static void bch2_btree_path_verify_cached(struct btree_trans *trans, ck = (void *) path->l[0].b; BUG_ON(ck->key.btree_id != path->btree_id || - bkey_cmp(ck->key.pos, path->pos)); + !bkey_eq(ck->key.pos, path->pos)); if (!locked) btree_node_unlock(trans, path, 0); @@ -278,8 +278,8 @@ static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) && iter->pos.snapshot != iter->snapshot); - BUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 || - bkey_cmp(iter->pos, iter->k.p) > 0); + BUG_ON(bkey_lt(iter->pos, bkey_start_pos(&iter->k)) || + bkey_gt(iter->pos, iter->k.p)); } static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) @@ -313,7 +313,7 @@ static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k if (ret) goto out; - if (!bkey_cmp(prev.k->p, k.k->p) && + if (bkey_eq(prev.k->p, k.k->p) && bch2_snapshot_is_ancestor(trans->c, iter->snapshot, prev.k->p.snapshot) > 0) { struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF; @@ -355,11 +355,11 @@ void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id, continue; if (!key_cache) { - if (bkey_cmp(pos, path->l[0].b->data->min_key) >= 0 && - bkey_cmp(pos, path->l[0].b->key.k.p) <= 0) + if (bkey_ge(pos, path->l[0].b->data->min_key) && + bkey_le(pos, path->l[0].b->key.k.p)) return; } else { - if (!bkey_cmp(pos, path->pos)) + if (bkey_eq(pos, path->pos)) return; } } @@ -1548,16 +1548,16 @@ struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey * _k = bch2_btree_node_iter_peek_all(&l->iter, l->b); k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null; - EBUG_ON(k.k && bkey_deleted(k.k) && bpos_cmp(k.k->p, path->pos) == 0); + EBUG_ON(k.k && bkey_deleted(k.k) && bpos_eq(k.k->p, path->pos)); - if (!k.k || bpos_cmp(path->pos, k.k->p)) + if (!k.k || !bpos_eq(path->pos, k.k->p)) goto hole; } else { struct bkey_cached *ck = (void *) path->l[0].b; EBUG_ON(ck && (path->btree_id != ck->key.btree_id || - bkey_cmp(path->pos, ck->key.pos))); + !bkey_eq(path->pos, ck->key.pos))); EBUG_ON(!ck || !ck->valid); *u = ck->k->k; @@ -1615,7 +1615,7 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter) if (!b) goto out; - BUG_ON(bpos_cmp(b->key.k.p, iter->pos) < 0); + BUG_ON(bpos_lt(b->key.k.p, iter->pos)); bkey_init(&iter->k); iter->k.p = iter->pos = b->key.k.p; @@ -1666,7 +1666,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter) b = btree_path_node(path, path->level + 1); - if (!bpos_cmp(iter->pos, b->key.k.p)) { + if (bpos_eq(iter->pos, b->key.k.p)) { __btree_path_set_level_up(trans, path, path->level++); } else { /* @@ -1709,9 +1709,9 @@ inline bool bch2_btree_iter_advance(struct btree_iter *iter) { if (likely(!(iter->flags & BTREE_ITER_ALL_LEVELS))) { struct bpos pos = iter->k.p; - bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS - ? bpos_cmp(pos, SPOS_MAX) - : bkey_cmp(pos, SPOS_MAX)) != 0; + bool ret = !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS + ? bpos_eq(pos, SPOS_MAX) + : bkey_eq(pos, SPOS_MAX)); if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS)) pos = bkey_successor(iter, pos); @@ -1729,9 +1729,9 @@ inline bool bch2_btree_iter_advance(struct btree_iter *iter) inline bool bch2_btree_iter_rewind(struct btree_iter *iter) { struct bpos pos = bkey_start_pos(&iter->k); - bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS - ? bpos_cmp(pos, POS_MIN) - : bkey_cmp(pos, POS_MIN)) != 0; + bool ret = !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS + ? bpos_eq(pos, POS_MIN) + : bkey_eq(pos, POS_MIN)); if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS)) pos = bkey_predecessor(iter, pos); @@ -1750,11 +1750,11 @@ struct bkey_i *__bch2_btree_trans_peek_updates(struct btree_iter *iter) continue; if (i->btree_id > iter->btree_id) break; - if (bpos_cmp(i->k->k.p, iter->path->pos) < 0) + if (bpos_lt(i->k->k.p, iter->path->pos)) continue; if (i->key_cache_already_flushed) continue; - if (!ret || bpos_cmp(i->k->k.p, ret->k.p) < 0) + if (!ret || bpos_lt(i->k->k.p, ret->k.p)) ret = i->k; } @@ -1774,7 +1774,7 @@ struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans, { struct bkey_i *k; - if (bpos_cmp(iter->path->pos, iter->journal_pos) < 0) + if (bpos_lt(iter->path->pos, iter->journal_pos)) iter->journal_idx = 0; k = bch2_journal_keys_peek_upto(trans->c, iter->btree_id, @@ -1913,8 +1913,8 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp next_update = btree_trans_peek_updates(iter); if (next_update && - bpos_cmp(next_update->k.p, - k.k ? k.k->p : l->b->key.k.p) <= 0) { + bpos_le(next_update->k.p, + k.k ? k.k->p : l->b->key.k.p)) { iter->k = next_update->k; k = bkey_i_to_s_c(next_update); } @@ -1927,7 +1927,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp * whiteout, with a real key at the same position, since * in the btree deleted keys sort before non deleted. */ - search_key = bpos_cmp(search_key, k.k->p) + search_key = !bpos_eq(search_key, k.k->p) ? k.k->p : bpos_successor(k.k->p); continue; @@ -1935,7 +1935,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp if (likely(k.k)) { break; - } else if (likely(bpos_cmp(l->b->key.k.p, SPOS_MAX))) { + } else if (likely(!bpos_eq(l->b->key.k.p, SPOS_MAX))) { /* Advance to next leaf node: */ search_key = bpos_successor(l->b->key.k.p); } else { @@ -1985,19 +1985,19 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e */ if (!(iter->flags & BTREE_ITER_IS_EXTENTS)) iter_pos = k.k->p; - else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0) + else if (bkey_gt(bkey_start_pos(k.k), iter->pos)) iter_pos = bkey_start_pos(k.k); else iter_pos = iter->pos; - if (bkey_cmp(iter_pos, end) > 0) { + if (bkey_gt(iter_pos, end)) { bch2_btree_iter_set_pos(iter, end); k = bkey_s_c_null; goto out_no_locked; } if (iter->update_path && - bkey_cmp(iter->update_path->pos, k.k->p)) { + !bkey_eq(iter->update_path->pos, k.k->p)) { bch2_path_put_nokeep(trans, iter->update_path, iter->flags & BTREE_ITER_INTENT); iter->update_path = NULL; @@ -2120,7 +2120,7 @@ struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter) /* Check if we should go up to the parent node: */ if (!k.k || (iter->advanced && - !bpos_cmp(path_l(iter->path)->b->key.k.p, iter->pos))) { + bpos_eq(path_l(iter->path)->b->key.k.p, iter->pos))) { iter->pos = path_l(iter->path)->b->key.k.p; btree_path_set_level_up(trans, iter->path); iter->advanced = false; @@ -2136,7 +2136,7 @@ struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter) if (iter->path->level != iter->min_depth && (iter->advanced || !k.k || - bpos_cmp(iter->pos, k.k->p))) { + !bpos_eq(iter->pos, k.k->p))) { btree_path_set_level_down(trans, iter->path, iter->min_depth); iter->pos = bpos_successor(iter->pos); iter->advanced = false; @@ -2147,7 +2147,7 @@ struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter) if (iter->path->level == iter->min_depth && iter->advanced && k.k && - !bpos_cmp(iter->pos, k.k->p)) { + bpos_eq(iter->pos, k.k->p)) { iter->pos = bpos_successor(iter->pos); iter->advanced = false; continue; @@ -2155,7 +2155,7 @@ struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter) if (iter->advanced && iter->path->level == iter->min_depth && - bpos_cmp(k.k->p, iter->pos)) + !bpos_eq(k.k->p, iter->pos)) iter->advanced = false; BUG_ON(iter->advanced); @@ -2225,8 +2225,8 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter) &iter->path->l[0], &iter->k); if (!k.k || ((iter->flags & BTREE_ITER_IS_EXTENTS) - ? bpos_cmp(bkey_start_pos(k.k), search_key) >= 0 - : bpos_cmp(k.k->p, search_key) > 0)) + ? bpos_ge(bkey_start_pos(k.k), search_key) + : bpos_gt(k.k->p, search_key))) k = btree_path_level_prev(trans, iter->path, &iter->path->l[0], &iter->k); @@ -2240,7 +2240,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter) * longer at the same _key_ (not pos), return * that candidate */ - if (saved_path && bkey_cmp(k.k->p, saved_k.p)) { + if (saved_path && !bkey_eq(k.k->p, saved_k.p)) { bch2_path_put_nokeep(trans, iter->path, iter->flags & BTREE_ITER_INTENT); iter->path = saved_path; @@ -2275,7 +2275,7 @@ got_key: } break; - } else if (likely(bpos_cmp(iter->path->l[0].b->data->min_key, POS_MIN))) { + } else if (likely(!bpos_eq(iter->path->l[0].b->data->min_key, POS_MIN))) { /* Advance to previous leaf node: */ search_key = bpos_predecessor(iter->path->l[0].b->data->min_key); } else { @@ -2286,10 +2286,10 @@ got_key: } } - EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0); + EBUG_ON(bkey_gt(bkey_start_pos(k.k), iter->pos)); /* Extents can straddle iter->pos: */ - if (bkey_cmp(k.k->p, iter->pos) < 0) + if (bkey_lt(k.k->p, iter->pos)) iter->pos = k.k->p; if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) @@ -2354,7 +2354,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter) struct bkey_i *next_update; if ((next_update = btree_trans_peek_updates(iter)) && - !bpos_cmp(next_update->k.p, iter->pos)) { + bpos_eq(next_update->k.p, iter->pos)) { iter->k = next_update->k; k = bkey_i_to_s_c(next_update); goto out; @@ -2410,7 +2410,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter) next = k.k ? bkey_start_pos(k.k) : POS_MAX; - if (bkey_cmp(iter->pos, next) < 0) { + if (bkey_lt(iter->pos, next)) { bkey_init(&iter->k); iter->k.p = iter->pos; diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h index c0344b5a40e9..a297a741b44e 100644 --- a/fs/bcachefs/btree_iter.h +++ b/fs/bcachefs/btree_iter.h @@ -407,7 +407,7 @@ static inline struct bkey_s_c bch2_btree_iter_peek_upto_type(struct btree_iter * if (!(flags & BTREE_ITER_SLOTS)) return bch2_btree_iter_peek_upto(iter, end); - if (bkey_cmp(iter->pos, end) > 0) + if (bkey_gt(iter->pos, end)) return bkey_s_c_null; return bch2_btree_iter_peek_slot(iter); diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c index 19cf2bf92962..33586fab5fa5 100644 --- a/fs/bcachefs/btree_key_cache.c +++ b/fs/bcachefs/btree_key_cache.c @@ -27,8 +27,8 @@ static int bch2_btree_key_cache_cmp_fn(struct rhashtable_compare_arg *arg, const struct bkey_cached *ck = obj; const struct bkey_cached_key *key = arg->key; - return cmp_int(ck->key.btree_id, key->btree_id) ?: - bpos_cmp(ck->key.pos, key->pos); + return ck->key.btree_id != key->btree_id || + !bpos_eq(ck->key.pos, key->pos); } static const struct rhashtable_params bch2_btree_key_cache_params = { @@ -475,7 +475,7 @@ retry: BUG_ON(ret); if (ck->key.btree_id != path->btree_id || - bpos_cmp(ck->key.pos, path->pos)) { + !bpos_eq(ck->key.pos, path->pos)) { six_unlock_type(&ck->c.lock, lock_want); goto retry; } @@ -549,7 +549,7 @@ retry: return ret; if (ck->key.btree_id != path->btree_id || - bpos_cmp(ck->key.pos, path->pos)) { + !bpos_eq(ck->key.pos, path->pos)) { six_unlock_type(&ck->c.lock, lock_want); goto retry; } diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c index 762e83b0b06c..e75f12fb7a9b 100644 --- a/fs/bcachefs/btree_update_interior.c +++ b/fs/bcachefs/btree_update_interior.c @@ -71,7 +71,7 @@ static void btree_node_interior_verify(struct bch_fs *c, struct btree *b) break; bp = bkey_s_c_to_btree_ptr_v2(k); - if (bpos_cmp(next_node, bp.v->min_key)) { + if (!bpos_eq(next_node, bp.v->min_key)) { bch2_dump_btree_node(c, b); bch2_bpos_to_text(&buf1, next_node); bch2_bpos_to_text(&buf2, bp.v->min_key); @@ -81,7 +81,7 @@ static void btree_node_interior_verify(struct bch_fs *c, struct btree *b) bch2_btree_node_iter_advance(&iter, b); if (bch2_btree_node_iter_end(&iter)) { - if (bpos_cmp(k.k->p, b->key.k.p)) { + if (!bpos_eq(k.k->p, b->key.k.p)) { bch2_dump_btree_node(c, b); bch2_bpos_to_text(&buf1, b->key.k.p); bch2_bpos_to_text(&buf2, k.k->p); @@ -1327,7 +1327,7 @@ __bch2_btree_insert_keys_interior(struct btree_update *as, while (!bch2_keylist_empty(keys)) { struct bkey_i *k = bch2_keylist_front(keys); - if (bpos_cmp(k->k.p, b->key.k.p) > 0) + if (bpos_gt(k->k.p, b->key.k.p)) break; bch2_insert_fixup_btree_ptr(as, trans, path, b, &node_iter, k); @@ -1444,8 +1444,7 @@ static void btree_split_insert_keys(struct btree_update *as, struct keylist *keys) { if (!bch2_keylist_empty(keys) && - bpos_cmp(bch2_keylist_front(keys)->k.p, - b->data->max_key) <= 0) { + bpos_le(bch2_keylist_front(keys)->k.p, b->data->max_key)) { struct btree_node_iter node_iter; bch2_btree_node_iter_init(&node_iter, b, &bch2_keylist_front(keys)->k.p); @@ -1769,8 +1768,8 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans, b = path->l[level].b; - if ((sib == btree_prev_sib && !bpos_cmp(b->data->min_key, POS_MIN)) || - (sib == btree_next_sib && !bpos_cmp(b->data->max_key, SPOS_MAX))) { + if ((sib == btree_prev_sib && bpos_eq(b->data->min_key, POS_MIN)) || + (sib == btree_next_sib && bpos_eq(b->data->max_key, SPOS_MAX))) { b->sib_u64s[sib] = U16_MAX; return 0; } @@ -1803,7 +1802,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans, next = m; } - if (bkey_cmp(bpos_successor(prev->data->max_key), next->data->min_key)) { + if (!bpos_eq(bpos_successor(prev->data->max_key), next->data->min_key)) { struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF; bch2_bpos_to_text(&buf1, prev->data->max_key); @@ -2096,7 +2095,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans, iter2.flags & BTREE_ITER_INTENT); BUG_ON(iter2.path->level != b->c.level); - BUG_ON(bpos_cmp(iter2.path->pos, new_key->k.p)); + BUG_ON(!bpos_eq(iter2.path->pos, new_key->k.p)); btree_path_set_level_up(trans, iter2.path); diff --git a/fs/bcachefs/btree_update_leaf.c b/fs/bcachefs/btree_update_leaf.c index df28dfb1d0a2..ecd63cc8e9cf 100644 --- a/fs/bcachefs/btree_update_leaf.c +++ b/fs/bcachefs/btree_update_leaf.c @@ -92,8 +92,8 @@ bool bch2_btree_bset_insert_key(struct btree_trans *trans, EBUG_ON(btree_node_just_written(b)); EBUG_ON(bset_written(b, btree_bset_last(b))); EBUG_ON(bkey_deleted(&insert->k) && bkey_val_u64s(&insert->k)); - EBUG_ON(bpos_cmp(insert->k.p, b->data->min_key) < 0); - EBUG_ON(bpos_cmp(insert->k.p, b->data->max_key) > 0); + EBUG_ON(bpos_lt(insert->k.p, b->data->min_key)); + EBUG_ON(bpos_gt(insert->k.p, b->data->max_key)); EBUG_ON(insert->k.u64s > bch_btree_keys_u64s_remaining(trans->c, b)); @@ -257,7 +257,7 @@ static void btree_insert_key_leaf(struct btree_trans *trans, static inline void btree_insert_entry_checks(struct btree_trans *trans, struct btree_insert_entry *i) { - BUG_ON(bpos_cmp(i->k->k.p, i->path->pos)); + BUG_ON(!bpos_eq(i->k->k.p, i->path->pos)); BUG_ON(i->cached != i->path->cached); BUG_ON(i->level != i->path->level); BUG_ON(i->btree_id != i->path->btree_id); @@ -1141,7 +1141,7 @@ static noinline int __check_pos_snapshot_overwritten(struct btree_trans *trans, if (!k.k) break; - if (bkey_cmp(pos, k.k->p)) + if (!bkey_eq(pos, k.k->p)) break; if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, pos.snapshot)) { @@ -1242,7 +1242,7 @@ int bch2_trans_update_extent(struct btree_trans *trans, if (!k.k) goto out; - if (!bkey_cmp(k.k->p, bkey_start_pos(&insert->k))) { + if (bkey_eq(k.k->p, bkey_start_pos(&insert->k))) { if (bch2_bkey_maybe_mergable(k.k, &insert->k)) { ret = extent_front_merge(trans, &iter, k, &insert, flags); if (ret) @@ -1252,9 +1252,9 @@ int bch2_trans_update_extent(struct btree_trans *trans, goto next; } - while (bkey_cmp(insert->k.p, bkey_start_pos(k.k)) > 0) { - bool front_split = bkey_cmp(bkey_start_pos(k.k), start) < 0; - bool back_split = bkey_cmp(k.k->p, insert->k.p) > 0; + while (bkey_gt(insert->k.p, bkey_start_pos(k.k))) { + bool front_split = bkey_lt(bkey_start_pos(k.k), start); + bool back_split = bkey_gt(k.k->p, insert->k.p); /* * If we're going to be splitting a compressed extent, note it @@ -1313,7 +1313,7 @@ int bch2_trans_update_extent(struct btree_trans *trans, goto err; } - if (bkey_cmp(k.k->p, insert->k.p) <= 0) { + if (bkey_le(k.k->p, insert->k.p)) { update = bch2_trans_kmalloc(trans, sizeof(*update)); if ((ret = PTR_ERR_OR_ZERO(update))) goto err; @@ -1407,7 +1407,7 @@ static int need_whiteout_for_snapshot(struct btree_trans *trans, for_each_btree_key_norestart(trans, iter, btree_id, pos, BTREE_ITER_ALL_SNAPSHOTS| BTREE_ITER_NOPRESERVE, k, ret) { - if (bkey_cmp(k.k->p, pos)) + if (!bkey_eq(k.k->p, pos)) break; if (bch2_snapshot_is_ancestor(trans->c, snapshot, @@ -1463,7 +1463,7 @@ bch2_trans_update_by_path_trace(struct btree_trans *trans, struct btree_path *pa EBUG_ON(!path->should_be_locked); EBUG_ON(trans->nr_updates >= BTREE_ITER_MAX); - EBUG_ON(bpos_cmp(k->k.p, path->pos)); + EBUG_ON(!bpos_eq(k->k.p, path->pos)); n = (struct btree_insert_entry) { .flags = flags, @@ -1571,7 +1571,7 @@ int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter btree_id_cached(trans->c, path->btree_id)) { if (!iter->key_cache_path || !iter->key_cache_path->should_be_locked || - bpos_cmp(iter->key_cache_path->pos, k->k.p)) { + !bpos_eq(iter->key_cache_path->pos, k->k.p)) { if (!iter->key_cache_path) iter->key_cache_path = bch2_path_get(trans, path->btree_id, path->pos, 1, 0, @@ -1680,7 +1680,7 @@ int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id, if (ret) goto err; - if (bkey_cmp(iter.pos, end) >= 0) + if (bkey_ge(iter.pos, end)) break; bkey_init(&delete.k); diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c index f200536d5385..5e7e3aee4817 100644 --- a/fs/bcachefs/data_update.c +++ b/fs/bcachefs/data_update.c @@ -31,7 +31,7 @@ static int insert_snapshot_whiteouts(struct btree_trans *trans, darray_init(&s); - if (!bkey_cmp(old_pos, new_pos)) + if (bkey_eq(old_pos, new_pos)) return 0; if (!snapshot_t(c, old_pos.snapshot)->children[0]) @@ -46,7 +46,7 @@ static int insert_snapshot_whiteouts(struct btree_trans *trans, if (ret) break; - if (bkey_cmp(old_pos, k.k->p)) + if (!bkey_eq(old_pos, k.k->p)) break; if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, old_pos.snapshot)) { @@ -238,7 +238,7 @@ err: if (ret) break; next: - while (bkey_cmp(iter.pos, bch2_keylist_front(keys)->k.p) >= 0) { + while (bkey_ge(iter.pos, bch2_keylist_front(keys)->k.p)) { bch2_keylist_pop_front(keys); if (bch2_keylist_empty(keys)) goto out; diff --git a/fs/bcachefs/debug.c b/fs/bcachefs/debug.c index ea53730fa64e..28d739c94aa3 100644 --- a/fs/bcachefs/debug.c +++ b/fs/bcachefs/debug.c @@ -307,7 +307,7 @@ static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf, if (ret) return ret; - if (!bpos_cmp(SPOS_MAX, i->from)) + if (bpos_eq(SPOS_MAX, i->from)) return i->ret; bch2_trans_init(&trans, i->c, 0, 0); @@ -318,7 +318,7 @@ static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf, break; bch2_btree_node_to_text(&i->buf, i->c, b); - i->from = bpos_cmp(SPOS_MAX, b->key.k.p) + i->from = !bpos_eq(SPOS_MAX, b->key.k.p) ? bpos_successor(b->key.k.p) : b->key.k.p; } @@ -369,7 +369,7 @@ static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf, if (ret) break; - if (bpos_cmp(l->b->key.k.p, i->prev_node) > 0) { + if (bpos_gt(l->b->key.k.p, i->prev_node)) { bch2_btree_node_to_text(&i->buf, i->c, l->b); i->prev_node = l->b->key.k.p; } diff --git a/fs/bcachefs/dirent.c b/fs/bcachefs/dirent.c index de528c3afe8c..f1838b7c45ee 100644 --- a/fs/bcachefs/dirent.c +++ b/fs/bcachefs/dirent.c @@ -350,8 +350,8 @@ int bch2_dirent_rename(struct btree_trans *trans, bkey_init(&new_src->k); new_src->k.p = src_iter.pos; - if (bkey_cmp(dst_pos, src_iter.pos) <= 0 && - bkey_cmp(src_iter.pos, dst_iter.pos) < 0) { + if (bkey_le(dst_pos, src_iter.pos) && + bkey_lt(src_iter.pos, dst_iter.pos)) { /* * We have a hash collision for the new dst key, * and new_src - the key we're deleting - is between diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c index c9836a7b47c8..cab088bb5413 100644 --- a/fs/bcachefs/ec.c +++ b/fs/bcachefs/ec.c @@ -108,7 +108,7 @@ int bch2_stripe_invalid(const struct bch_fs *c, struct bkey_s_c k, { const struct bch_stripe *s = bkey_s_c_to_stripe(k).v; - if (!bkey_cmp(k.k->p, POS_MIN)) { + if (bkey_eq(k.k->p, POS_MIN)) { prt_printf(err, "stripe at POS_MIN"); return -BCH_ERR_invalid_bkey; } @@ -725,7 +725,7 @@ static int ec_stripe_bkey_insert(struct btree_trans *trans, for_each_btree_key_norestart(trans, iter, BTREE_ID_stripes, start_pos, BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) { - if (bkey_cmp(k.k->p, POS(0, U32_MAX)) > 0) { + if (bkey_gt(k.k->p, POS(0, U32_MAX))) { if (start_pos.offset) { start_pos = min_pos; bch2_btree_iter_set_pos(&iter, start_pos); diff --git a/fs/bcachefs/extent_update.c b/fs/bcachefs/extent_update.c index 2fd5d9672a44..21d6f88c7397 100644 --- a/fs/bcachefs/extent_update.c +++ b/fs/bcachefs/extent_update.c @@ -73,8 +73,7 @@ static int count_iters_for_insert(struct btree_trans *trans, for_each_btree_key_norestart(trans, iter, BTREE_ID_reflink, POS(0, idx + offset), BTREE_ITER_SLOTS, r_k, ret2) { - if (bkey_cmp(bkey_start_pos(r_k.k), - POS(0, idx + sectors)) >= 0) + if (bkey_ge(bkey_start_pos(r_k.k), POS(0, idx + sectors))) break; /* extent_update_to_keys(), for the reflink_v update */ @@ -132,11 +131,10 @@ int bch2_extent_atomic_end(struct btree_trans *trans, for_each_btree_key_continue_norestart(copy, 0, k, ret) { unsigned offset = 0; - if (bkey_cmp(bkey_start_pos(k.k), *end) >= 0) + if (bkey_ge(bkey_start_pos(k.k), *end)) break; - if (bkey_cmp(bkey_start_pos(&insert->k), - bkey_start_pos(k.k)) > 0) + if (bkey_gt(bkey_start_pos(&insert->k), bkey_start_pos(k.k))) offset = bkey_start_offset(&insert->k) - bkey_start_offset(k.k); diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c index 795c4128c9d0..39a5b4560a05 100644 --- a/fs/bcachefs/extents.c +++ b/fs/bcachefs/extents.c @@ -235,7 +235,7 @@ void bch2_btree_ptr_v2_compat(enum btree_id btree_id, unsigned version, if (version < bcachefs_metadata_version_inode_btree_change && btree_node_type_is_extents(btree_id) && - bkey_cmp(bp.v->min_key, POS_MIN)) + !bkey_eq(bp.v->min_key, POS_MIN)) bp.v->min_key = write ? bpos_nosnap_predecessor(bp.v->min_key) : bpos_nosnap_successor(bp.v->min_key); @@ -1222,10 +1222,10 @@ int bch2_cut_front_s(struct bpos where, struct bkey_s k) int val_u64s_delta; u64 sub; - if (bkey_cmp(where, bkey_start_pos(k.k)) <= 0) + if (bkey_le(where, bkey_start_pos(k.k))) return 0; - EBUG_ON(bkey_cmp(where, k.k->p) > 0); + EBUG_ON(bkey_gt(where, k.k->p)); sub = where.offset - bkey_start_offset(k.k); @@ -1302,10 +1302,10 @@ int bch2_cut_back_s(struct bpos where, struct bkey_s k) int val_u64s_delta; u64 len = 0; - if (bkey_cmp(where, k.k->p) >= 0) + if (bkey_ge(where, k.k->p)) return 0; - EBUG_ON(bkey_cmp(where, bkey_start_pos(k.k)) < 0); + EBUG_ON(bkey_lt(where, bkey_start_pos(k.k))); len = where.offset - bkey_start_offset(k.k); diff --git a/fs/bcachefs/extents.h b/fs/bcachefs/extents.h index 9e72d5c76e9e..e27d39b728b3 100644 --- a/fs/bcachefs/extents.h +++ b/fs/bcachefs/extents.h @@ -676,9 +676,8 @@ enum bch_extent_overlap { static inline enum bch_extent_overlap bch2_extent_overlap(const struct bkey *k, const struct bkey *m) { - int cmp1 = bkey_cmp(k->p, m->p) < 0; - int cmp2 = bkey_cmp(bkey_start_pos(k), - bkey_start_pos(m)) > 0; + int cmp1 = bkey_lt(k->p, m->p); + int cmp2 = bkey_gt(bkey_start_pos(k), bkey_start_pos(m)); return (cmp1 << 1) + cmp2; } diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c index 312d11f6a41e..88f1fa791090 100644 --- a/fs/bcachefs/fs-io.c +++ b/fs/bcachefs/fs-io.c @@ -2109,7 +2109,7 @@ retry: for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents, SPOS(inum.inum, offset, snapshot), BTREE_ITER_SLOTS, k, err) { - if (bkey_cmp(bkey_start_pos(k.k), POS(inum.inum, end)) >= 0) + if (bkey_ge(bkey_start_pos(k.k), POS(inum.inum, end))) break; if (k.k->p.snapshot != snapshot || @@ -2593,7 +2593,7 @@ retry: goto err; for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents, start, 0, k, ret) { - if (bkey_cmp(bkey_start_pos(k.k), end) >= 0) + if (bkey_ge(bkey_start_pos(k.k), end)) break; if (bkey_extent_is_data(k.k)) { @@ -3031,13 +3031,13 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode, break; if (insert && - bkey_cmp(k.k->p, POS(inode->v.i_ino, offset >> 9)) <= 0) + bkey_le(k.k->p, POS(inode->v.i_ino, offset >> 9))) break; reassemble: bch2_bkey_buf_reassemble(©, c, k); if (insert && - bkey_cmp(bkey_start_pos(k.k), move_pos) < 0) + bkey_lt(bkey_start_pos(k.k), move_pos)) bch2_cut_front(move_pos, copy.k); copy.k->k.p.offset += shift >> 9; @@ -3047,7 +3047,7 @@ reassemble: if (ret) continue; - if (bkey_cmp(atomic_end, copy.k->k.p)) { + if (!bkey_eq(atomic_end, copy.k->k.p)) { if (insert) { move_pos = atomic_end; move_pos.offset -= shift >> 9; @@ -3125,7 +3125,7 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode, POS(inode->v.i_ino, start_sector), BTREE_ITER_SLOTS|BTREE_ITER_INTENT); - while (!ret && bkey_cmp(iter.pos, end_pos) < 0) { + while (!ret && bkey_lt(iter.pos, end_pos)) { s64 i_sectors_delta = 0; struct quota_res quota_res = { 0 }; struct bkey_s_c k; diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c index ffc2671cece6..d8e6cbd8f7f2 100644 --- a/fs/bcachefs/fsck.c +++ b/fs/bcachefs/fsck.c @@ -133,7 +133,7 @@ static int lookup_first_inode(struct btree_trans *trans, u64 inode_nr, if (ret) goto err; - if (!k.k || bkey_cmp(k.k->p, POS(0, inode_nr))) { + if (!k.k || !bkey_eq(k.k->p, POS(0, inode_nr))) { ret = -ENOENT; goto err; } @@ -527,7 +527,7 @@ static int snapshots_seen_update(struct bch_fs *c, struct snapshots_seen *s, }; int ret = 0; - if (bkey_cmp(s->pos, pos)) + if (!bkey_eq(s->pos, pos)) s->ids.nr = 0; pos.snapshot = n.equiv; @@ -825,7 +825,7 @@ static int hash_check_key(struct btree_trans *trans, for_each_btree_key_norestart(trans, iter, desc.btree_id, POS(hash_k.k->p.inode, hash), BTREE_ITER_SLOTS, k, ret) { - if (!bkey_cmp(k.k->p, hash_k.k->p)) + if (bkey_eq(k.k->p, hash_k.k->p)) break; if (fsck_err_on(k.k->type == desc.key_type && @@ -1199,7 +1199,7 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter, BUG_ON(!iter->path->should_be_locked); #if 0 - if (bkey_cmp(prev.k->k.p, bkey_start_pos(k.k)) > 0) { + if (bkey_gt(prev.k->k.p, bkey_start_pos(k.k))) { char buf1[200]; char buf2[200]; diff --git a/fs/bcachefs/inode.c b/fs/bcachefs/inode.c index 848efbfbc65d..a7a99a18572a 100644 --- a/fs/bcachefs/inode.c +++ b/fs/bcachefs/inode.c @@ -657,7 +657,7 @@ int bch2_inode_create(struct btree_trans *trans, again: while ((k = bch2_btree_iter_peek(iter)).k && !(ret = bkey_err(k)) && - bkey_cmp(k.k->p, POS(0, max)) < 0) { + bkey_lt(k.k->p, POS(0, max))) { while (pos < iter->pos.offset) { if (!bch2_btree_key_cache_find(c, BTREE_ID_inodes, POS(0, pos))) goto found_slot; diff --git a/fs/bcachefs/io.c b/fs/bcachefs/io.c index 66b17cc6fc78..1341b7cd9fbf 100644 --- a/fs/bcachefs/io.c +++ b/fs/bcachefs/io.c @@ -236,7 +236,7 @@ int bch2_sum_sector_overwrites(struct btree_trans *trans, (!new_compressed && bch2_bkey_sectors_compressed(old)))) *usage_increasing = true; - if (bkey_cmp(old.k->p, new->k.p) >= 0) + if (bkey_ge(old.k->p, new->k.p)) break; } @@ -536,7 +536,7 @@ int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter, bch2_btree_iter_set_snapshot(iter, snapshot); k = bch2_btree_iter_peek(iter); - if (bkey_cmp(iter->pos, end_pos) >= 0) { + if (bkey_ge(iter->pos, end_pos)) { bch2_btree_iter_set_pos(iter, end_pos); break; } @@ -630,7 +630,7 @@ static int bch2_write_index_default(struct bch_write_op *op) if (ret) break; - if (bkey_cmp(iter.pos, k->k.p) >= 0) + if (bkey_ge(iter.pos, k->k.p)) bch2_keylist_pop_front(&op->insert_keys); else bch2_cut_front(iter.pos, k); @@ -1373,7 +1373,7 @@ static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op) bkey_start_pos(&orig->k), BTREE_ITER_INTENT, k, NULL, NULL, BTREE_INSERT_NOFAIL, ({ - if (bkey_cmp(bkey_start_pos(k.k), orig->k.p) >= 0) + if (bkey_ge(bkey_start_pos(k.k), orig->k.p)) break; bch2_nocow_write_convert_one_unwritten(&trans, &iter, orig, k, op->new_i_size); @@ -1749,7 +1749,7 @@ void bch2_write(struct closure *cl) BUG_ON(!op->nr_replicas); BUG_ON(!op->write_point.v); - BUG_ON(!bkey_cmp(op->pos, POS_MAX)); + BUG_ON(bkey_eq(op->pos, POS_MAX)); op->start_time = local_clock(); bch2_keylist_init(&op->insert_keys, op->inline_keys); diff --git a/fs/bcachefs/keylist.c b/fs/bcachefs/keylist.c index 5e85055b0f93..29e51bde8313 100644 --- a/fs/bcachefs/keylist.c +++ b/fs/bcachefs/keylist.c @@ -36,7 +36,7 @@ void bch2_keylist_add_in_order(struct keylist *l, struct bkey_i *insert) struct bkey_i *where; for_each_keylist_key(l, where) - if (bkey_cmp(insert->k.p, where->k.p) < 0) + if (bpos_lt(insert->k.p, where->k.p)) break; memmove_u64s_up((u64 *) where + insert->k.u64s, @@ -63,6 +63,6 @@ void bch2_verify_keylist_sorted(struct keylist *l) for_each_keylist_key(l, k) BUG_ON(bkey_next(k) != l->top && - bpos_cmp(k->k.p, bkey_next(k)->k.p) >= 0); + bpos_ge(k->k.p, bkey_next(k)->k.p)); } #endif diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c index 700f847c395c..3f003b6b92b8 100644 --- a/fs/bcachefs/move.c +++ b/fs/bcachefs/move.c @@ -361,7 +361,7 @@ static int lookup_inode(struct btree_trans *trans, struct bpos pos, if (ret) goto err; - if (!k.k || bkey_cmp(k.k->p, pos)) { + if (!k.k || !bkey_eq(k.k->p, pos)) { ret = -ENOENT; goto err; } @@ -492,7 +492,7 @@ static int __bch2_move_data(struct moving_context *ctxt, if (ret) break; - if (bkey_cmp(bkey_start_pos(k.k), end) >= 0) + if (bkey_ge(bkey_start_pos(k.k), end)) break; ctxt->stats->pos = iter.pos; diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c index fdcd70e8eb1f..7906e817db6e 100644 --- a/fs/bcachefs/recovery.c +++ b/fs/bcachefs/recovery.c @@ -133,9 +133,8 @@ search: (k = idx_to_key(keys, *idx), k->btree_id == btree_id && k->level == level && - bpos_cmp(k->k->k.p, end_pos) <= 0)) { - if (bpos_cmp(k->k->k.p, pos) >= 0 && - !k->overwritten) + bpos_le(k->k->k.p, end_pos))) { + if (bpos_ge(k->k->k.p, pos) && !k->overwritten) return k->k; (*idx)++; @@ -296,7 +295,7 @@ void bch2_journal_key_overwritten(struct bch_fs *c, enum btree_id btree, if (idx < keys->size && keys->d[idx].btree_id == btree && keys->d[idx].level == level && - !bpos_cmp(keys->d[idx].k->k.p, pos)) + bpos_eq(keys->d[idx].k->k.p, pos)) keys->d[idx].overwritten = true; } @@ -355,7 +354,7 @@ static void bch2_journal_iter_advance_btree(struct btree_and_journal_iter *iter) void bch2_btree_and_journal_iter_advance(struct btree_and_journal_iter *iter) { - if (!bpos_cmp(iter->pos, SPOS_MAX)) + if (bpos_eq(iter->pos, SPOS_MAX)) iter->at_end = true; else iter->pos = bpos_successor(iter->pos); @@ -369,19 +368,19 @@ again: return bkey_s_c_null; while ((btree_k = bch2_journal_iter_peek_btree(iter)).k && - bpos_cmp(btree_k.k->p, iter->pos) < 0) + bpos_lt(btree_k.k->p, iter->pos)) bch2_journal_iter_advance_btree(iter); while ((journal_k = bch2_journal_iter_peek(&iter->journal)).k && - bpos_cmp(journal_k.k->p, iter->pos) < 0) + bpos_lt(journal_k.k->p, iter->pos)) bch2_journal_iter_advance(&iter->journal); ret = journal_k.k && - (!btree_k.k || bpos_cmp(journal_k.k->p, btree_k.k->p) <= 0) + (!btree_k.k || bpos_le(journal_k.k->p, btree_k.k->p)) ? journal_k : btree_k; - if (ret.k && iter->b && bpos_cmp(ret.k->p, iter->b->data->max_key) > 0) + if (ret.k && iter->b && bpos_gt(ret.k->p, iter->b->data->max_key)) ret = bkey_s_c_null; if (ret.k) { @@ -529,7 +528,7 @@ static int journal_keys_sort(struct bch_fs *c) while (src + 1 < keys->d + keys->nr && src[0].btree_id == src[1].btree_id && src[0].level == src[1].level && - !bpos_cmp(src[0].k->k.p, src[1].k->k.p)) + bpos_eq(src[0].k->k.p, src[1].k->k.p)) src++; *dst++ = *src++; diff --git a/fs/bcachefs/reflink.c b/fs/bcachefs/reflink.c index 45bdb69e58fe..eed4ad38b21d 100644 --- a/fs/bcachefs/reflink.c +++ b/fs/bcachefs/reflink.c @@ -252,7 +252,7 @@ static struct bkey_s_c get_next_src(struct btree_iter *iter, struct bpos end) int ret; for_each_btree_key_continue_norestart(*iter, 0, k, ret) { - if (bkey_cmp(iter->pos, end) >= 0) + if (bkey_ge(iter->pos, end)) break; if (bkey_extent_is_unwritten(k)) @@ -262,7 +262,7 @@ static struct bkey_s_c get_next_src(struct btree_iter *iter, struct bpos end) return k; } - if (bkey_cmp(iter->pos, end) >= 0) + if (bkey_ge(iter->pos, end)) bch2_btree_iter_set_pos(iter, end); return ret ? bkey_s_c_err(ret) : bkey_s_c_null; } @@ -304,7 +304,7 @@ s64 bch2_remap_range(struct bch_fs *c, while ((ret == 0 || bch2_err_matches(ret, BCH_ERR_transaction_restart)) && - bkey_cmp(dst_iter.pos, dst_end) < 0) { + bkey_lt(dst_iter.pos, dst_end)) { struct disk_reservation disk_res = { 0 }; bch2_trans_begin(&trans); @@ -337,7 +337,7 @@ s64 bch2_remap_range(struct bch_fs *c, if (ret) continue; - if (bkey_cmp(src_want, src_iter.pos) < 0) { + if (bkey_lt(src_want, src_iter.pos)) { ret = bch2_fpunch_at(&trans, &dst_iter, dst_inum, min(dst_end.offset, dst_iter.pos.offset + @@ -389,8 +389,8 @@ s64 bch2_remap_range(struct bch_fs *c, bch2_trans_iter_exit(&trans, &dst_iter); bch2_trans_iter_exit(&trans, &src_iter); - BUG_ON(!ret && bkey_cmp(dst_iter.pos, dst_end)); - BUG_ON(bkey_cmp(dst_iter.pos, dst_end) > 0); + BUG_ON(!ret && !bkey_eq(dst_iter.pos, dst_end)); + BUG_ON(bkey_gt(dst_iter.pos, dst_end)); dst_done = dst_iter.pos.offset - dst_start.offset; new_i_size = min(dst_iter.pos.offset << 9, new_i_size); diff --git a/fs/bcachefs/subvolume.c b/fs/bcachefs/subvolume.c index 7abbc5bc0a39..f19f6f8d3233 100644 --- a/fs/bcachefs/subvolume.c +++ b/fs/bcachefs/subvolume.c @@ -30,8 +30,8 @@ int bch2_snapshot_invalid(const struct bch_fs *c, struct bkey_s_c k, struct bkey_s_c_snapshot s; u32 i, id; - if (bkey_cmp(k.k->p, POS(0, U32_MAX)) > 0 || - bkey_cmp(k.k->p, POS(0, 1)) < 0) { + if (bkey_gt(k.k->p, POS(0, U32_MAX)) || + bkey_lt(k.k->p, POS(0, 1))) { prt_printf(err, "bad pos"); return -BCH_ERR_invalid_bkey; } @@ -592,7 +592,7 @@ static int snapshot_delete_key(struct btree_trans *trans, struct bch_fs *c = trans->c; u32 equiv = snapshot_t(c, k.k->p.snapshot)->equiv; - if (bkey_cmp(k.k->p, *last_pos)) + if (!bkey_eq(k.k->p, *last_pos)) equiv_seen->nr = 0; *last_pos = k.k->p; @@ -770,8 +770,8 @@ static int bch2_delete_dead_snapshots_hook(struct btree_trans *trans, int bch2_subvolume_invalid(const struct bch_fs *c, struct bkey_s_c k, int rw, struct printbuf *err) { - if (bkey_cmp(k.k->p, SUBVOL_POS_MIN) < 0 || - bkey_cmp(k.k->p, SUBVOL_POS_MAX) > 0) { + if (bkey_lt(k.k->p, SUBVOL_POS_MIN) || + bkey_gt(k.k->p, SUBVOL_POS_MAX)) { prt_printf(err, "invalid pos"); return -BCH_ERR_invalid_bkey; } @@ -1028,7 +1028,7 @@ int bch2_subvolume_create(struct btree_trans *trans, u64 inode, for_each_btree_key(trans, dst_iter, BTREE_ID_subvolumes, SUBVOL_POS_MIN, BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) { - if (bkey_cmp(k.k->p, SUBVOL_POS_MAX) > 0) + if (bkey_gt(k.k->p, SUBVOL_POS_MAX)) break; /* |