summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2019-11-16 16:25:58 -0500
committerKent Overstreet <kent.overstreet@gmail.com>2019-11-16 16:35:38 -0500
commit98bc6e8ac8ee470ac349ed2b5f462b6ddfc81c18 (patch)
treee307a8887a4ec1caea2eb96c0690d7e331f4c498
parent2d4eaf5e8faaa33c250693f7f748b87ff681205a (diff)
bcachefs: Reorganize extents.c
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
-rw-r--r--fs/bcachefs/extents.c1319
-rw-r--r--fs/bcachefs/extents.h6
2 files changed, 653 insertions, 672 deletions
diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c
index 2f1d4634ea09..a3c2b7566aa2 100644
--- a/fs/bcachefs/extents.c
+++ b/fs/bcachefs/extents.c
@@ -25,82 +25,6 @@
#include <trace/events/bcachefs.h>
-unsigned bch2_bkey_nr_ptrs(struct bkey_s_c k)
-{
- struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
- const struct bch_extent_ptr *ptr;
- unsigned nr_ptrs = 0;
-
- bkey_for_each_ptr(p, ptr)
- nr_ptrs++;
-
- return nr_ptrs;
-}
-
-unsigned bch2_bkey_nr_dirty_ptrs(struct bkey_s_c k)
-{
- unsigned nr_ptrs = 0;
-
- switch (k.k->type) {
- case KEY_TYPE_btree_ptr:
- case KEY_TYPE_extent:
- case KEY_TYPE_reflink_v: {
- struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
- const struct bch_extent_ptr *ptr;
-
- bkey_for_each_ptr(p, ptr)
- nr_ptrs += !ptr->cached;
- BUG_ON(!nr_ptrs);
- break;
- }
- case KEY_TYPE_reservation:
- nr_ptrs = bkey_s_c_to_reservation(k).v->nr_replicas;
- break;
- }
-
- return nr_ptrs;
-}
-
-static unsigned bch2_extent_ptr_durability(struct bch_fs *c,
- struct extent_ptr_decoded p)
-{
- unsigned durability = 0;
- struct bch_dev *ca;
-
- if (p.ptr.cached)
- return 0;
-
- ca = bch_dev_bkey_exists(c, p.ptr.dev);
-
- if (ca->mi.state != BCH_MEMBER_STATE_FAILED)
- durability = max_t(unsigned, durability, ca->mi.durability);
-
- if (p.has_ec) {
- struct stripe *s =
- genradix_ptr(&c->stripes[0], p.ec.idx);
-
- if (WARN_ON(!s))
- goto out;
-
- durability = max_t(unsigned, durability, s->nr_redundant);
- }
-out:
- return durability;
-}
-
-unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
-{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
- unsigned durability = 0;
-
- bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
- durability += bch2_extent_ptr_durability(c, p);
-
- return durability;
-}
-
static struct bch_dev_io_failures *dev_io_failures(struct bch_io_failures *f,
unsigned dev)
{
@@ -219,172 +143,76 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
return ret;
}
-void bch2_bkey_append_ptr(struct bkey_i *k,
- struct bch_extent_ptr ptr)
-{
- EBUG_ON(bch2_bkey_has_device(bkey_i_to_s_c(k), ptr.dev));
+/* KEY_TYPE_btree_ptr: */
- switch (k->k.type) {
- case KEY_TYPE_btree_ptr:
- case KEY_TYPE_extent:
- EBUG_ON(bkey_val_u64s(&k->k) >= BKEY_EXTENT_VAL_U64s_MAX);
-
- ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
-
- memcpy((void *) &k->v + bkey_val_bytes(&k->k),
- &ptr,
- sizeof(ptr));
- k->u64s++;
- break;
- default:
- BUG();
- }
-}
-
-void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
-{
- struct bch_extent_ptr *ptr;
-
- bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev);
-}
-
-const struct bch_extent_ptr *
-bch2_bkey_has_device(struct bkey_s_c k, unsigned dev)
+const char *bch2_btree_ptr_invalid(const struct bch_fs *c, struct bkey_s_c k)
{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const struct bch_extent_ptr *ptr;
-
- bkey_for_each_ptr(ptrs, ptr)
- if (ptr->dev == dev)
- return ptr;
+ if (bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX)
+ return "value too big";
- return NULL;
+ return bch2_bkey_ptrs_invalid(c, k);
}
-bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
+void bch2_btree_ptr_debugcheck(struct bch_fs *c, struct bkey_s_c k)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr;
+ const char *err;
+ char buf[160];
+ struct bucket_mark mark;
+ struct bch_dev *ca;
- bkey_for_each_ptr(ptrs, ptr)
- if (bch2_dev_in_target(c, ptr->dev, target) &&
- (!ptr->cached ||
- !ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
- return true;
-
- return false;
-}
-
-/* extent specific utility code */
-
-const struct bch_extent_ptr *
-bch2_extent_has_device(struct bkey_s_c_extent e, unsigned dev)
-{
- const struct bch_extent_ptr *ptr;
+ bch2_fs_bug_on(!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
+ !bch2_bkey_replicas_marked(c, k, false), c,
+ "btree key bad (replicas not marked in superblock):\n%s",
+ (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
- extent_for_each_ptr(e, ptr)
- if (ptr->dev == dev)
- return ptr;
+ if (!test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags))
+ return;
- return NULL;
-}
+ bkey_for_each_ptr(ptrs, ptr) {
+ ca = bch_dev_bkey_exists(c, ptr->dev);
-const struct bch_extent_ptr *
-bch2_extent_has_group(struct bch_fs *c, struct bkey_s_c_extent e, unsigned group)
-{
- const struct bch_extent_ptr *ptr;
+ mark = ptr_bucket_mark(ca, ptr);
- extent_for_each_ptr(e, ptr) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ err = "stale";
+ if (gen_after(mark.gen, ptr->gen))
+ goto err;
- if (ca->mi.group &&
- ca->mi.group - 1 == group)
- return ptr;
+ err = "inconsistent";
+ if (mark.data_type != BCH_DATA_BTREE ||
+ mark.dirty_sectors < c->opts.btree_node_size)
+ goto err;
}
- return NULL;
-}
-
-unsigned bch2_extent_is_compressed(struct bkey_s_c k)
-{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
- unsigned ret = 0;
-
- bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
- if (!p.ptr.cached &&
- p.crc.compression_type != BCH_COMPRESSION_NONE)
- ret += p.crc.compressed_size;
-
- return ret;
+ return;
+err:
+ bch2_bkey_val_to_text(&PBUF(buf), c, k);
+ bch2_fs_bug(c, "%s btree pointer %s: bucket %zi gen %i mark %08x",
+ err, buf, PTR_BUCKET_NR(ca, ptr),
+ mark.gen, (unsigned) mark.v.counter);
}
-bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k,
- struct bch_extent_ptr m, u64 offset)
+void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
-
- bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
- if (p.ptr.dev == m.dev &&
- p.ptr.gen == m.gen &&
- (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(k.k) ==
- (s64) m.offset - offset)
- return true;
-
- return false;
+ bch2_bkey_ptrs_to_text(out, c, k);
}
-static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs,
- union bch_extent_entry *entry)
-{
- union bch_extent_entry *i = ptrs.start;
+/* Extent checksum entries: */
- if (i == entry)
- return NULL;
-
- while (extent_entry_next(i) != entry)
- i = extent_entry_next(i);
- return i;
-}
-
-union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s k,
- struct bch_extent_ptr *ptr)
+/* returns true if not equal */
+static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
+ struct bch_extent_crc_unpacked r)
{
- struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
- union bch_extent_entry *dst, *src, *prev;
- bool drop_crc = true;
-
- EBUG_ON(ptr < &ptrs.start->ptr ||
- ptr >= &ptrs.end->ptr);
- EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
-
- src = extent_entry_next(to_entry(ptr));
- if (src != ptrs.end &&
- !extent_entry_is_crc(src))
- drop_crc = false;
-
- dst = to_entry(ptr);
- while ((prev = extent_entry_prev(ptrs, dst))) {
- if (extent_entry_is_ptr(prev))
- break;
-
- if (extent_entry_is_crc(prev)) {
- if (drop_crc)
- dst = prev;
- break;
- }
-
- dst = prev;
- }
-
- memmove_u64s_down(dst, src,
- (u64 *) ptrs.end - (u64 *) src);
- k.k->u64s -= (u64 *) src - (u64 *) dst;
-
- return dst;
+ return (l.csum_type != r.csum_type ||
+ l.compression_type != r.compression_type ||
+ l.compressed_size != r.compressed_size ||
+ l.uncompressed_size != r.uncompressed_size ||
+ l.offset != r.offset ||
+ l.live_size != r.live_size ||
+ l.nonce != r.nonce ||
+ bch2_crc_cmp(l.csum, r.csum));
}
static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u,
@@ -463,20 +291,345 @@ restart_narrow_pointers:
return ret;
}
-/* returns true if not equal */
-static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
- struct bch_extent_crc_unpacked r)
+static unsigned bch2_crc_field_size_max[] = {
+ [BCH_EXTENT_ENTRY_crc32] = CRC32_SIZE_MAX,
+ [BCH_EXTENT_ENTRY_crc64] = CRC64_SIZE_MAX,
+ [BCH_EXTENT_ENTRY_crc128] = CRC128_SIZE_MAX,
+};
+
+static void bch2_extent_crc_pack(union bch_extent_crc *dst,
+ struct bch_extent_crc_unpacked src,
+ enum bch_extent_entry_type type)
{
- return (l.csum_type != r.csum_type ||
- l.compression_type != r.compression_type ||
- l.compressed_size != r.compressed_size ||
- l.uncompressed_size != r.uncompressed_size ||
- l.offset != r.offset ||
- l.live_size != r.live_size ||
- l.nonce != r.nonce ||
- bch2_crc_cmp(l.csum, r.csum));
+#define set_common_fields(_dst, _src) \
+ _dst.type = 1 << type; \
+ _dst.csum_type = _src.csum_type, \
+ _dst.compression_type = _src.compression_type, \
+ _dst._compressed_size = _src.compressed_size - 1, \
+ _dst._uncompressed_size = _src.uncompressed_size - 1, \
+ _dst.offset = _src.offset
+
+ switch (type) {
+ case BCH_EXTENT_ENTRY_crc32:
+ set_common_fields(dst->crc32, src);
+ dst->crc32.csum = *((__le32 *) &src.csum.lo);
+ break;
+ case BCH_EXTENT_ENTRY_crc64:
+ set_common_fields(dst->crc64, src);
+ dst->crc64.nonce = src.nonce;
+ dst->crc64.csum_lo = src.csum.lo;
+ dst->crc64.csum_hi = *((__le16 *) &src.csum.hi);
+ break;
+ case BCH_EXTENT_ENTRY_crc128:
+ set_common_fields(dst->crc128, src);
+ dst->crc128.nonce = src.nonce;
+ dst->crc128.csum = src.csum;
+ break;
+ default:
+ BUG();
+ }
+#undef set_common_fields
}
+void bch2_extent_crc_append(struct bkey_i *k,
+ struct bch_extent_crc_unpacked new)
+{
+ struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
+ union bch_extent_crc *crc = (void *) ptrs.end;
+ enum bch_extent_entry_type type;
+
+ if (bch_crc_bytes[new.csum_type] <= 4 &&
+ new.uncompressed_size - 1 <= CRC32_SIZE_MAX &&
+ new.nonce <= CRC32_NONCE_MAX)
+ type = BCH_EXTENT_ENTRY_crc32;
+ else if (bch_crc_bytes[new.csum_type] <= 10 &&
+ new.uncompressed_size - 1 <= CRC64_SIZE_MAX &&
+ new.nonce <= CRC64_NONCE_MAX)
+ type = BCH_EXTENT_ENTRY_crc64;
+ else if (bch_crc_bytes[new.csum_type] <= 16 &&
+ new.uncompressed_size - 1 <= CRC128_SIZE_MAX &&
+ new.nonce <= CRC128_NONCE_MAX)
+ type = BCH_EXTENT_ENTRY_crc128;
+ else
+ BUG();
+
+ bch2_extent_crc_pack(crc, new, type);
+
+ k->k.u64s += extent_entry_u64s(ptrs.end);
+
+ EBUG_ON(bkey_val_u64s(&k->k) > BKEY_EXTENT_VAL_U64s_MAX);
+}
+
+/* What's this code go under? */
+
+static inline void __extent_entry_insert(struct bkey_i *k,
+ union bch_extent_entry *dst,
+ union bch_extent_entry *new)
+{
+ union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
+
+ memmove_u64s_up_small((u64 *) dst + extent_entry_u64s(new),
+ dst, (u64 *) end - (u64 *) dst);
+ k->k.u64s += extent_entry_u64s(new);
+ memcpy(dst, new, extent_entry_bytes(new));
+}
+
+void bch2_extent_ptr_decoded_append(struct bkey_i *k,
+ struct extent_ptr_decoded *p)
+{
+ struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
+ struct bch_extent_crc_unpacked crc =
+ bch2_extent_crc_unpack(&k->k, NULL);
+ union bch_extent_entry *pos;
+
+ if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
+ pos = ptrs.start;
+ goto found;
+ }
+
+ bkey_for_each_crc(&k->k, ptrs, crc, pos)
+ if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
+ pos = extent_entry_next(pos);
+ goto found;
+ }
+
+ bch2_extent_crc_append(k, p->crc);
+ pos = bkey_val_end(bkey_i_to_s(k));
+found:
+ p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
+ __extent_entry_insert(k, pos, to_entry(&p->ptr));
+
+ if (p->has_ec) {
+ p->ec.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
+ __extent_entry_insert(k, pos, to_entry(&p->ec));
+ }
+}
+
+/* KEY_TYPE_extent: */
+
+const char *bch2_extent_invalid(const struct bch_fs *c, struct bkey_s_c k)
+{
+ return bch2_bkey_ptrs_invalid(c, k);
+}
+
+void bch2_extent_debugcheck(struct bch_fs *c, struct bkey_s_c k)
+{
+ struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+ char buf[160];
+
+ /*
+ * XXX: we should be doing most/all of these checks at startup time,
+ * where we check bch2_bkey_invalid() in btree_node_read_done()
+ *
+ * But note that we can't check for stale pointers or incorrect gc marks
+ * until after journal replay is done (it might be an extent that's
+ * going to get overwritten during replay)
+ */
+
+ if (percpu_down_read_trylock(&c->mark_lock)) {
+ bch2_fs_bug_on(!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
+ !bch2_bkey_replicas_marked_locked(c, e.s_c, false), c,
+ "extent key bad (replicas not marked in superblock):\n%s",
+ (bch2_bkey_val_to_text(&PBUF(buf), c, e.s_c), buf));
+ percpu_up_read(&c->mark_lock);
+ }
+ /*
+ * If journal replay hasn't finished, we might be seeing keys
+ * that will be overwritten by the time journal replay is done:
+ */
+ if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
+ return;
+
+ extent_for_each_ptr_decode(e, p, entry) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
+ struct bucket_mark mark = ptr_bucket_mark(ca, &p.ptr);
+ unsigned stale = gen_after(mark.gen, p.ptr.gen);
+ unsigned disk_sectors = ptr_disk_sectors(p);
+ unsigned mark_sectors = p.ptr.cached
+ ? mark.cached_sectors
+ : mark.dirty_sectors;
+
+ bch2_fs_bug_on(stale && !p.ptr.cached, c,
+ "stale dirty pointer (ptr gen %u bucket %u",
+ p.ptr.gen, mark.gen);
+
+ bch2_fs_bug_on(stale > 96, c, "key too stale: %i", stale);
+
+ bch2_fs_bug_on(!stale &&
+ (mark.data_type != BCH_DATA_USER ||
+ mark_sectors < disk_sectors), c,
+ "extent pointer not marked: %s:\n"
+ "type %u sectors %u < %u",
+ (bch2_bkey_val_to_text(&PBUF(buf), c, e.s_c), buf),
+ mark.data_type,
+ mark_sectors, disk_sectors);
+ }
+}
+
+void bch2_extent_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
+{
+ bch2_bkey_ptrs_to_text(out, c, k);
+}
+
+enum merge_result bch2_extent_merge(struct bch_fs *c,
+ struct bkey_s _l, struct bkey_s _r)
+{
+ struct bkey_s_extent l = bkey_s_to_extent(_l);
+ struct bkey_s_extent r = bkey_s_to_extent(_r);
+ union bch_extent_entry *en_l = l.v->start;
+ union bch_extent_entry *en_r = r.v->start;
+ struct bch_extent_crc_unpacked crc_l, crc_r;
+
+ if (bkey_val_u64s(l.k) != bkey_val_u64s(r.k))
+ return BCH_MERGE_NOMERGE;
+
+ crc_l = bch2_extent_crc_unpack(l.k, NULL);
+
+ extent_for_each_entry(l, en_l) {
+ en_r = vstruct_idx(r.v, (u64 *) en_l - l.v->_data);
+
+ if (extent_entry_type(en_l) != extent_entry_type(en_r))
+ return BCH_MERGE_NOMERGE;
+
+ switch (extent_entry_type(en_l)) {
+ case BCH_EXTENT_ENTRY_ptr: {
+ const struct bch_extent_ptr *lp = &en_l->ptr;
+ const struct bch_extent_ptr *rp = &en_r->ptr;
+ struct bch_dev *ca;
+
+ if (lp->offset + crc_l.compressed_size != rp->offset ||
+ lp->dev != rp->dev ||
+ lp->gen != rp->gen)
+ return BCH_MERGE_NOMERGE;
+
+ /* We don't allow extents to straddle buckets: */
+ ca = bch_dev_bkey_exists(c, lp->dev);
+
+ if (PTR_BUCKET_NR(ca, lp) != PTR_BUCKET_NR(ca, rp))
+ return BCH_MERGE_NOMERGE;
+
+ break;
+ }
+ case BCH_EXTENT_ENTRY_stripe_ptr:
+ if (en_l->stripe_ptr.block != en_r->stripe_ptr.block ||
+ en_l->stripe_ptr.idx != en_r->stripe_ptr.idx)
+ return BCH_MERGE_NOMERGE;
+ break;
+ case BCH_EXTENT_ENTRY_crc32:
+ case BCH_EXTENT_ENTRY_crc64:
+ case BCH_EXTENT_ENTRY_crc128:
+ crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
+ crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
+
+ if (crc_l.csum_type != crc_r.csum_type ||
+ crc_l.compression_type != crc_r.compression_type ||
+ crc_l.nonce != crc_r.nonce)
+ return BCH_MERGE_NOMERGE;
+
+ if (crc_l.offset + crc_l.live_size != crc_l.compressed_size ||
+ crc_r.offset)
+ return BCH_MERGE_NOMERGE;
+
+ if (!bch2_checksum_mergeable(crc_l.csum_type))
+ return BCH_MERGE_NOMERGE;
+
+ if (crc_l.compression_type)
+ return BCH_MERGE_NOMERGE;
+
+ if (crc_l.csum_type &&
+ crc_l.uncompressed_size +
+ crc_r.uncompressed_size > c->sb.encoded_extent_max)
+ return BCH_MERGE_NOMERGE;
+
+ if (crc_l.uncompressed_size + crc_r.uncompressed_size - 1 >
+ bch2_crc_field_size_max[extent_entry_type(en_l)])
+ return BCH_MERGE_NOMERGE;
+
+ break;
+ default:
+ return BCH_MERGE_NOMERGE;
+ }
+ }
+
+ extent_for_each_entry(l, en_l) {
+ struct bch_extent_crc_unpacked crc_l, crc_r;
+
+ en_r = vstruct_idx(r.v, (u64 *) en_l - l.v->_data);
+
+ if (!extent_entry_is_crc(en_l))
+ continue;
+
+ crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
+ crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
+
+ crc_l.csum = bch2_checksum_merge(crc_l.csum_type,
+ crc_l.csum,
+ crc_r.csum,
+ crc_r.uncompressed_size << 9);
+
+ crc_l.uncompressed_size += crc_r.uncompressed_size;
+ crc_l.compressed_size += crc_r.compressed_size;
+
+ bch2_extent_crc_pack(entry_to_crc(en_l), crc_l,
+ extent_entry_type(en_l));
+ }
+
+ bch2_key_resize(l.k, l.k->size + r.k->size);
+
+ return BCH_MERGE_MERGE;
+}
+
+/* KEY_TYPE_reservation: */
+
+const char *bch2_reservation_invalid(const struct bch_fs *c, struct bkey_s_c k)
+{
+ struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
+
+ if (bkey_val_bytes(k.k) != sizeof(struct bch_reservation))
+ return "incorrect value size";
+
+ if (!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX)
+ return "invalid nr_replicas";
+
+ return NULL;
+}
+
+void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
+{
+ struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
+
+ pr_buf(out, "generation %u replicas %u",
+ le32_to_cpu(r.v->generation),
+ r.v->nr_replicas);
+}
+
+enum merge_result bch2_reservation_merge(struct bch_fs *c,
+ struct bkey_s _l, struct bkey_s _r)
+{
+ struct bkey_s_reservation l = bkey_s_to_reservation(_l);
+ struct bkey_s_reservation r = bkey_s_to_reservation(_r);
+
+ if (l.v->generation != r.v->generation ||
+ l.v->nr_replicas != r.v->nr_replicas)
+ return BCH_MERGE_NOMERGE;
+
+ if ((u64) l.k->size + r.k->size > KEY_SIZE_MAX) {
+ bch2_key_resize(l.k, KEY_SIZE_MAX);
+ bch2_cut_front_s(l.k->p, r.s);
+ return BCH_MERGE_PARTIAL;
+ }
+
+ bch2_key_resize(l.k, l.k->size + r.k->size);
+
+ return BCH_MERGE_MERGE;
+}
+
+/* Generic code for keys with pointers: */
+
void bch2_ptr_swab(const struct bkey_format *f, struct bkey_packed *k)
{
union bch_extent_entry *entry;
@@ -659,356 +812,80 @@ const char *bch2_bkey_ptrs_invalid(const struct bch_fs *c, struct bkey_s_c k)
return NULL;
}
-/* Btree ptrs */
-
-const char *bch2_btree_ptr_invalid(const struct bch_fs *c, struct bkey_s_c k)
-{
- if (bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX)
- return "value too big";
-
- return bch2_bkey_ptrs_invalid(c, k);
-}
-
-void bch2_btree_ptr_debugcheck(struct bch_fs *c, struct bkey_s_c k)
+unsigned bch2_bkey_nr_ptrs(struct bkey_s_c k)
{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr;
- const char *err;
- char buf[160];
- struct bucket_mark mark;
- struct bch_dev *ca;
-
- bch2_fs_bug_on(!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
- !bch2_bkey_replicas_marked(c, k, false), c,
- "btree key bad (replicas not marked in superblock):\n%s",
- (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
-
- if (!test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags))
- return;
-
- bkey_for_each_ptr(ptrs, ptr) {
- ca = bch_dev_bkey_exists(c, ptr->dev);
-
- mark = ptr_bucket_mark(ca, ptr);
-
- err = "stale";
- if (gen_after(mark.gen, ptr->gen))
- goto err;
-
- err = "inconsistent";
- if (mark.data_type != BCH_DATA_BTREE ||
- mark.dirty_sectors < c->opts.btree_node_size)
- goto err;
- }
+ unsigned nr_ptrs = 0;
- return;
-err:
- bch2_bkey_val_to_text(&PBUF(buf), c, k);
- bch2_fs_bug(c, "%s btree pointer %s: bucket %zi gen %i mark %08x",
- err, buf, PTR_BUCKET_NR(ca, ptr),
- mark.gen, (unsigned) mark.v.counter);
-}
+ bkey_for_each_ptr(p, ptr)
+ nr_ptrs++;
-void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
- struct bkey_s_c k)
-{
- bch2_bkey_ptrs_to_text(out, c, k);
+ return nr_ptrs;
}
-/* Extents */
-
-int bch2_cut_front_s(struct bpos where, struct bkey_s k)
+unsigned bch2_bkey_nr_dirty_ptrs(struct bkey_s_c k)
{
- unsigned new_val_u64s = bkey_val_u64s(k.k);
- int val_u64s_delta;
- u64 sub;
-
- if (bkey_cmp(where, bkey_start_pos(k.k)) <= 0)
- return 0;
-
- EBUG_ON(bkey_cmp(where, k.k->p) > 0);
-
- sub = where.offset - bkey_start_offset(k.k);
-
- k.k->size -= sub;
-
- if (!k.k->size) {
- k.k->type = KEY_TYPE_deleted;
- new_val_u64s = 0;
- }
+ unsigned nr_ptrs = 0;
switch (k.k->type) {
+ case KEY_TYPE_btree_ptr:
case KEY_TYPE_extent:
case KEY_TYPE_reflink_v: {
- struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
- union bch_extent_entry *entry;
- bool seen_crc = false;
-
- bkey_extent_entry_for_each(ptrs, entry) {
- switch (extent_entry_type(entry)) {
- case BCH_EXTENT_ENTRY_ptr:
- if (!seen_crc)
- entry->ptr.offset += sub;
- break;
- case BCH_EXTENT_ENTRY_crc32:
- entry->crc32.offset += sub;
- break;
- case BCH_EXTENT_ENTRY_crc64:
- entry->crc64.offset += sub;
- break;
- case BCH_EXTENT_ENTRY_crc128:
- entry->crc128.offset += sub;
- break;
- case BCH_EXTENT_ENTRY_stripe_ptr:
- break;
- }
-
- if (extent_entry_is_crc(entry))
- seen_crc = true;
- }
-
- break;
- }
- case KEY_TYPE_reflink_p: {
- struct bkey_s_reflink_p p = bkey_s_to_reflink_p(k);
+ struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
+ const struct bch_extent_ptr *ptr;
- le64_add_cpu(&p.v->idx, sub);
+ bkey_for_each_ptr(p, ptr)
+ nr_ptrs += !ptr->cached;
+ BUG_ON(!nr_ptrs);
break;
}
- case KEY_TYPE_inline_data: {
- struct bkey_s_inline_data d = bkey_s_to_inline_data(k);
-
- sub = min_t(u64, sub << 9, bkey_val_bytes(d.k));
-
- memmove(d.v->data,
- d.v->data + sub,
- bkey_val_bytes(d.k) - sub);
-
- new_val_u64s -= sub >> 3;
+ case KEY_TYPE_reservation:
+ nr_ptrs = bkey_s_c_to_reservation(k).v->nr_replicas;
break;
}
- }
-
- val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
- BUG_ON(val_u64s_delta < 0);
- set_bkey_val_u64s(k.k, new_val_u64s);
- memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
- return -val_u64s_delta;
+ return nr_ptrs;
}
-int bch2_cut_back_s(struct bpos where, struct bkey_s k)
+static unsigned bch2_extent_ptr_durability(struct bch_fs *c,
+ struct extent_ptr_decoded p)
{
- unsigned new_val_u64s = bkey_val_u64s(k.k);
- int val_u64s_delta;
- u64 len = 0;
+ unsigned durability = 0;
+ struct bch_dev *ca;
- if (bkey_cmp(where, k.k->p) >= 0)
+ if (p.ptr.cached)
return 0;
- EBUG_ON(bkey_cmp(where, bkey_start_pos(k.k)) < 0);
+ ca = bch_dev_bkey_exists(c, p.ptr.dev);
- len = where.offset - bkey_start_offset(k.k);
+ if (ca->mi.state != BCH_MEMBER_STATE_FAILED)
+ durability = max_t(unsigned, durability, ca->mi.durability);
- k.k->p = where;
- k.k->size = len;
+ if (p.has_ec) {
+ struct stripe *s =
+ genradix_ptr(&c->stripes[0], p.ec.idx);
- if (!len) {
- k.k->type = KEY_TYPE_deleted;
- new_val_u64s = 0;
- }
+ if (WARN_ON(!s))
+ goto out;
- switch (k.k->type) {
- case KEY_TYPE_inline_data:
- new_val_u64s = min(new_val_u64s, k.k->size << 6);
- break;
+ durability = max_t(unsigned, durability, s->nr_redundant);
}
-
- val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
- BUG_ON(val_u64s_delta < 0);
-
- set_bkey_val_u64s(k.k, new_val_u64s);
- memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
- return -val_u64s_delta;
-}
-
-const char *bch2_extent_invalid(const struct bch_fs *c, struct bkey_s_c k)
-{
- return bch2_bkey_ptrs_invalid(c, k);
+out:
+ return durability;
}
-void bch2_extent_debugcheck(struct bch_fs *c, struct bkey_s_c k)
+unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
{
- struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
- char buf[160];
-
- /*
- * XXX: we should be doing most/all of these checks at startup time,
- * where we check bch2_bkey_invalid() in btree_node_read_done()
- *
- * But note that we can't check for stale pointers or incorrect gc marks
- * until after journal replay is done (it might be an extent that's
- * going to get overwritten during replay)
- */
-
- if (percpu_down_read_trylock(&c->mark_lock)) {
- bch2_fs_bug_on(!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
- !bch2_bkey_replicas_marked_locked(c, e.s_c, false), c,
- "extent key bad (replicas not marked in superblock):\n%s",
- (bch2_bkey_val_to_text(&PBUF(buf), c, e.s_c), buf));
- percpu_up_read(&c->mark_lock);
- }
- /*
- * If journal replay hasn't finished, we might be seeing keys
- * that will be overwritten by the time journal replay is done:
- */
- if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
- return;
-
- extent_for_each_ptr_decode(e, p, entry) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
- struct bucket_mark mark = ptr_bucket_mark(ca, &p.ptr);
- unsigned stale = gen_after(mark.gen, p.ptr.gen);
- unsigned disk_sectors = ptr_disk_sectors(p);
- unsigned mark_sectors = p.ptr.cached
- ? mark.cached_sectors
- : mark.dirty_sectors;
-
- bch2_fs_bug_on(stale && !p.ptr.cached, c,
- "stale dirty pointer (ptr gen %u bucket %u",
- p.ptr.gen, mark.gen);
-
- bch2_fs_bug_on(stale > 96, c, "key too stale: %i", stale);
-
- bch2_fs_bug_on(!stale &&
- (mark.data_type != BCH_DATA_USER ||
- mark_sectors < disk_sectors), c,
- "extent pointer not marked: %s:\n"
- "type %u sectors %u < %u",
- (bch2_bkey_val_to_text(&PBUF(buf), c, e.s_c), buf),
- mark.data_type,
- mark_sectors, disk_sectors);
- }
-}
-
-void bch2_extent_to_text(struct printbuf *out, struct bch_fs *c,
- struct bkey_s_c k)
-{
- bch2_bkey_ptrs_to_text(out, c, k);
-}
-
-static unsigned bch2_crc_field_size_max[] = {
- [BCH_EXTENT_ENTRY_crc32] = CRC32_SIZE_MAX,
- [BCH_EXTENT_ENTRY_crc64] = CRC64_SIZE_MAX,
- [BCH_EXTENT_ENTRY_crc128] = CRC128_SIZE_MAX,
-};
-
-static void bch2_extent_crc_pack(union bch_extent_crc *dst,
- struct bch_extent_crc_unpacked src,
- enum bch_extent_entry_type type)
-{
-#define set_common_fields(_dst, _src) \
- _dst.type = 1 << type; \
- _dst.csum_type = _src.csum_type, \
- _dst.compression_type = _src.compression_type, \
- _dst._compressed_size = _src.compressed_size - 1, \
- _dst._uncompressed_size = _src.uncompressed_size - 1, \
- _dst.offset = _src.offset
-
- switch (type) {
- case BCH_EXTENT_ENTRY_crc32:
- set_common_fields(dst->crc32, src);
- dst->crc32.csum = *((__le32 *) &src.csum.lo);
- break;
- case BCH_EXTENT_ENTRY_crc64:
- set_common_fields(dst->crc64, src);
- dst->crc64.nonce = src.nonce;
- dst->crc64.csum_lo = src.csum.lo;
- dst->crc64.csum_hi = *((__le16 *) &src.csum.hi);
- break;
- case BCH_EXTENT_ENTRY_crc128:
- set_common_fields(dst->crc128, src);
- dst->crc128.nonce = src.nonce;
- dst->crc128.csum = src.csum;
- break;
- default:
- BUG();
- }
-#undef set_common_fields
-}
-
-void bch2_extent_crc_append(struct bkey_i *k,
- struct bch_extent_crc_unpacked new)
-{
- struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
- union bch_extent_crc *crc = (void *) ptrs.end;
- enum bch_extent_entry_type type;
-
- if (bch_crc_bytes[new.csum_type] <= 4 &&
- new.uncompressed_size - 1 <= CRC32_SIZE_MAX &&
- new.nonce <= CRC32_NONCE_MAX)
- type = BCH_EXTENT_ENTRY_crc32;
- else if (bch_crc_bytes[new.csum_type] <= 10 &&
- new.uncompressed_size - 1 <= CRC64_SIZE_MAX &&
- new.nonce <= CRC64_NONCE_MAX)
- type = BCH_EXTENT_ENTRY_crc64;
- else if (bch_crc_bytes[new.csum_type] <= 16 &&
- new.uncompressed_size - 1 <= CRC128_SIZE_MAX &&
- new.nonce <= CRC128_NONCE_MAX)
- type = BCH_EXTENT_ENTRY_crc128;
- else
- BUG();
-
- bch2_extent_crc_pack(crc, new, type);
-
- k->k.u64s += extent_entry_u64s(ptrs.end);
-
- EBUG_ON(bkey_val_u64s(&k->k) > BKEY_EXTENT_VAL_U64s_MAX);
-}
-
-static inline void __extent_entry_insert(struct bkey_i *k,
- union bch_extent_entry *dst,
- union bch_extent_entry *new)
-{
- union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
-
- memmove_u64s_up_small((u64 *) dst + extent_entry_u64s(new),
- dst, (u64 *) end - (u64 *) dst);
- k->k.u64s += extent_entry_u64s(new);
- memcpy(dst, new, extent_entry_bytes(new));
-}
-
-void bch2_extent_ptr_decoded_append(struct bkey_i *k,
- struct extent_ptr_decoded *p)
-{
- struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
- struct bch_extent_crc_unpacked crc =
- bch2_extent_crc_unpack(&k->k, NULL);
- union bch_extent_entry *pos;
-
- if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
- pos = ptrs.start;
- goto found;
- }
-
- bkey_for_each_crc(&k->k, ptrs, crc, pos)
- if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
- pos = extent_entry_next(pos);
- goto found;
- }
+ unsigned durability = 0;
- bch2_extent_crc_append(k, p->crc);
- pos = bkey_val_end(bkey_i_to_s(k));
-found:
- p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
- __extent_entry_insert(k, pos, to_entry(&p->ptr));
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
+ durability += bch2_extent_ptr_durability(c, p);
- if (p->has_ec) {
- p->ec.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
- __extent_entry_insert(k, pos, to_entry(&p->ec));
- }
+ return durability;
}
/*
@@ -1028,7 +905,7 @@ bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr));
/* will only happen if all pointers were cached: */
- if (!bkey_val_u64s(k.k))
+ if (!bch2_bkey_nr_ptrs(k.s_c))
k.k->type = KEY_TYPE_discard;
return bkey_whiteout(k.k);
@@ -1065,116 +942,172 @@ void bch2_bkey_mark_replicas_cached(struct bch_fs *c, struct bkey_s k,
}
}
-enum merge_result bch2_extent_merge(struct bch_fs *c,
- struct bkey_s _l, struct bkey_s _r)
+void bch2_bkey_append_ptr(struct bkey_i *k,
+ struct bch_extent_ptr ptr)
{
- struct bkey_s_extent l = bkey_s_to_extent(_l);
- struct bkey_s_extent r = bkey_s_to_extent(_r);
- union bch_extent_entry *en_l = l.v->start;
- union bch_extent_entry *en_r = r.v->start;
- struct bch_extent_crc_unpacked crc_l, crc_r;
+ EBUG_ON(bch2_bkey_has_device(bkey_i_to_s_c(k), ptr.dev));
- if (bkey_val_u64s(l.k) != bkey_val_u64s(r.k))
- return BCH_MERGE_NOMERGE;
+ switch (k->k.type) {
+ case KEY_TYPE_btree_ptr:
+ case KEY_TYPE_extent:
+ EBUG_ON(bkey_val_u64s(&k->k) >= BKEY_EXTENT_VAL_U64s_MAX);
- crc_l = bch2_extent_crc_unpack(l.k, NULL);
+ ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
- extent_for_each_entry(l, en_l) {
- en_r = vstruct_idx(r.v, (u64 *) en_l - l.v->_data);
+ memcpy((void *) &k->v + bkey_val_bytes(&k->k),
+ &ptr,
+ sizeof(ptr));
+ k->u64s++;
+ break;
+ default:
+ BUG();
+ }
+}
- if (extent_entry_type(en_l) != extent_entry_type(en_r))
- return BCH_MERGE_NOMERGE;
+static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs,
+ union bch_extent_entry *entry)
+{
+ union bch_extent_entry *i = ptrs.start;
- switch (extent_entry_type(en_l)) {
- case BCH_EXTENT_ENTRY_ptr: {
- const struct bch_extent_ptr *lp = &en_l->ptr;
- const struct bch_extent_ptr *rp = &en_r->ptr;
- struct bch_dev *ca;
+ if (i == entry)
+ return NULL;
- if (lp->offset + crc_l.compressed_size != rp->offset ||
- lp->dev != rp->dev ||
- lp->gen != rp->gen)
- return BCH_MERGE_NOMERGE;
+ while (extent_entry_next(i) != entry)
+ i = extent_entry_next(i);
+ return i;
+}
- /* We don't allow extents to straddle buckets: */
- ca = bch_dev_bkey_exists(c, lp->dev);
+union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s k,
+ struct bch_extent_ptr *ptr)
+{
+ struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
+ union bch_extent_entry *dst, *src, *prev;
+ bool drop_crc = true;
- if (PTR_BUCKET_NR(ca, lp) != PTR_BUCKET_NR(ca, rp))
- return BCH_MERGE_NOMERGE;
+ EBUG_ON(ptr < &ptrs.start->ptr ||
+ ptr >= &ptrs.end->ptr);
+ EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
+
+ src = extent_entry_next(to_entry(ptr));
+ if (src != ptrs.end &&
+ !extent_entry_is_crc(src))
+ drop_crc = false;
+ dst = to_entry(ptr);
+ while ((prev = extent_entry_prev(ptrs, dst))) {
+ if (extent_entry_is_ptr(prev))
break;
- }
- case BCH_EXTENT_ENTRY_stripe_ptr:
- if (en_l->stripe_ptr.block != en_r->stripe_ptr.block ||
- en_l->stripe_ptr.idx != en_r->stripe_ptr.idx)
- return BCH_MERGE_NOMERGE;
+
+ if (extent_entry_is_crc(prev)) {
+ if (drop_crc)
+ dst = prev;
break;
- case BCH_EXTENT_ENTRY_crc32:
- case BCH_EXTENT_ENTRY_crc64:
- case BCH_EXTENT_ENTRY_crc128:
- crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
- crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
+ }
- if (crc_l.csum_type != crc_r.csum_type ||
- crc_l.compression_type != crc_r.compression_type ||
- crc_l.nonce != crc_r.nonce)
- return BCH_MERGE_NOMERGE;
+ dst = prev;
+ }
- if (crc_l.offset + crc_l.live_size != crc_l.compressed_size ||
- crc_r.offset)
- return BCH_MERGE_NOMERGE;
+ memmove_u64s_down(dst, src,
+ (u64 *) ptrs.end - (u64 *) src);
+ k.k->u64s -= (u64 *) src - (u64 *) dst;
- if (!bch2_checksum_mergeable(crc_l.csum_type))
- return BCH_MERGE_NOMERGE;
+ return dst;
+}
- if (crc_l.compression_type)
- return BCH_MERGE_NOMERGE;
+void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
+{
+ struct bch_extent_ptr *ptr;
- if (crc_l.csum_type &&
- crc_l.uncompressed_size +
- crc_r.uncompressed_size > c->sb.encoded_extent_max)
- return BCH_MERGE_NOMERGE;
+ bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev);
+}
- if (crc_l.uncompressed_size + crc_r.uncompressed_size - 1 >
- bch2_crc_field_size_max[extent_entry_type(en_l)])
- return BCH_MERGE_NOMERGE;
+const struct bch_extent_ptr *
+bch2_bkey_has_device(struct bkey_s_c k, unsigned dev)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const struct bch_extent_ptr *ptr;
- break;
- default:
- return BCH_MERGE_NOMERGE;
- }
- }
+ bkey_for_each_ptr(ptrs, ptr)
+ if (ptr->dev == dev)
+ return ptr;
- extent_for_each_entry(l, en_l) {
- struct bch_extent_crc_unpacked crc_l, crc_r;
+ return NULL;
+}
- en_r = vstruct_idx(r.v, (u64 *) en_l - l.v->_data);
+bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const struct bch_extent_ptr *ptr;
- if (!extent_entry_is_crc(en_l))
- continue;
+ bkey_for_each_ptr(ptrs, ptr)
+ if (bch2_dev_in_target(c, ptr->dev, target) &&
+ (!ptr->cached ||
+ !ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
+ return true;
- crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
- crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
+ return false;
+}
- crc_l.csum = bch2_checksum_merge(crc_l.csum_type,
- crc_l.csum,
- crc_r.csum,
- crc_r.uncompressed_size << 9);
+unsigned bch2_extent_is_compressed(struct bkey_s_c k)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+ unsigned ret = 0;
- crc_l.uncompressed_size += crc_r.uncompressed_size;
- crc_l.compressed_size += crc_r.compressed_size;
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
+ if (!p.ptr.cached &&
+ p.crc.compression_type != BCH_COMPRESSION_NONE)
+ ret += p.crc.compressed_size;
- bch2_extent_crc_pack(entry_to_crc(en_l), crc_l,
- extent_entry_type(en_l));
- }
+ return ret;
+}
- bch2_key_resize(l.k, l.k->size + r.k->size);
+bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k,
+ struct bch_extent_ptr m, u64 offset)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
- return BCH_MERGE_MERGE;
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
+ if (p.ptr.dev == m.dev &&
+ p.ptr.gen == m.gen &&
+ (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(k.k) ==
+ (s64) m.offset - offset)
+ return true;
+
+ return false;
+}
+
+/**
+ * bch2_bkey_nr_ptrs_allocated
+ */
+unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k)
+{
+ unsigned ret = 0;
+
+ switch (k.k->type) {
+ case KEY_TYPE_extent: {
+ struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+
+ extent_for_each_ptr_decode(e, p, entry)
+ ret += !p.ptr.cached &&
+ p.crc.compression_type == BCH_COMPRESSION_NONE;
+ break;
+ }
+ case KEY_TYPE_reservation:
+ ret = bkey_s_c_to_reservation(k).v->nr_replicas;
+ break;
+ }
+
+ return ret;
}
bool bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size,
- unsigned nr_replicas)
+ unsigned nr_replicas)
{
struct btree_trans trans;
struct btree_iter *iter;
@@ -1202,71 +1135,119 @@ bool bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size,
return ret;
}
-unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k)
+/* Generic extent code: */
+
+int bch2_cut_front_s(struct bpos where, struct bkey_s k)
{
- unsigned ret = 0;
+ unsigned new_val_u64s = bkey_val_u64s(k.k);
+ int val_u64s_delta;
+ u64 sub;
+
+ if (bkey_cmp(where, bkey_start_pos(k.k)) <= 0)
+ return 0;
+
+ EBUG_ON(bkey_cmp(where, k.k->p) > 0);
+
+ sub = where.offset - bkey_start_offset(k.k);
+
+ k.k->size -= sub;
+
+ if (!k.k->size) {
+ k.k->type = KEY_TYPE_deleted;
+ new_val_u64s = 0;
+ }
switch (k.k->type) {
- case KEY_TYPE_extent: {
- struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
+ case KEY_TYPE_extent:
+ case KEY_TYPE_reflink_v: {
+ struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
+ union bch_extent_entry *entry;
+ bool seen_crc = false;
+
+ bkey_extent_entry_for_each(ptrs, entry) {
+ switch (extent_entry_type(entry)) {
+ case BCH_EXTENT_ENTRY_ptr:
+ if (!seen_crc)
+ entry->ptr.offset += sub;
+ break;
+ case BCH_EXTENT_ENTRY_crc32:
+ entry->crc32.offset += sub;
+ break;
+ case BCH_EXTENT_ENTRY_crc64:
+ entry->crc64.offset += sub;
+ break;
+ case BCH_EXTENT_ENTRY_crc128:
+ entry->crc128.offset += sub;
+ break;
+ case BCH_EXTENT_ENTRY_stripe_ptr:
+ break;
+ }
+
+ if (extent_entry_is_crc(entry))
+ seen_crc = true;
+ }
- extent_for_each_ptr_decode(e, p, entry)
- ret += !p.ptr.cached &&
- p.crc.compression_type == BCH_COMPRESSION_NONE;
break;
}
- case KEY_TYPE_reservation:
- ret = bkey_s_c_to_reservation(k).v->nr_replicas;
+ case KEY_TYPE_reflink_p: {
+ struct bkey_s_reflink_p p = bkey_s_to_reflink_p(k);
+
+ le64_add_cpu(&p.v->idx, sub);
break;
}
+ case KEY_TYPE_inline_data: {
+ struct bkey_s_inline_data d = bkey_s_to_inline_data(k);
- return ret;
-}
-
-/* KEY_TYPE_reservation: */
+ sub = min_t(u64, sub << 9, bkey_val_bytes(d.k));
-const char *bch2_reservation_invalid(const struct bch_fs *c, struct bkey_s_c k)
-{
- struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
+ memmove(d.v->data,
+ d.v->data + sub,
+ bkey_val_bytes(d.k) - sub);
- if (bkey_val_bytes(k.k) != sizeof(struct bch_reservation))
- return "incorrect value size";
+ new_val_u64s -= sub >> 3;
+ break;
+ }
+ }
- if (!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX)
- return "invalid nr_replicas";
+ val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
+ BUG_ON(val_u64s_delta < 0);
- return NULL;
+ set_bkey_val_u64s(k.k, new_val_u64s);
+ memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
+ return -val_u64s_delta;
}
-void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c,
- struct bkey_s_c k)
+int bch2_cut_back_s(struct bpos where, struct bkey_s k)
{
- struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
+ unsigned new_val_u64s = bkey_val_u64s(k.k);
+ int val_u64s_delta;
+ u64 len = 0;
- pr_buf(out, "generation %u replicas %u",
- le32_to_cpu(r.v->generation),
- r.v->nr_replicas);
-}
+ if (bkey_cmp(where, k.k->p) >= 0)
+ return 0;
-enum merge_result bch2_reservation_merge(struct bch_fs *c,
- struct bkey_s _l, struct bkey_s _r)
-{
- struct bkey_s_reservation l = bkey_s_to_reservation(_l);
- struct bkey_s_reservation r = bkey_s_to_reservation(_r);
+ EBUG_ON(bkey_cmp(where, bkey_start_pos(k.k)) < 0);
- if (l.v->generation != r.v->generation ||
- l.v->nr_replicas != r.v->nr_replicas)
- return BCH_MERGE_NOMERGE;
+ len = where.offset - bkey_start_offset(k.k);
- if ((u64) l.k->size + r.k->size > KEY_SIZE_MAX) {
- bch2_key_resize(l.k, KEY_SIZE_MAX);
- bch2_cut_front_s(l.k->p, r.s);
- return BCH_MERGE_PARTIAL;
+ k.k->p = where;
+ k.k->size = len;
+
+ if (!len) {
+ k.k->type = KEY_TYPE_deleted;
+ new_val_u64s = 0;
}
- bch2_key_resize(l.k, l.k->size + r.k->size);
+ switch (k.k->type) {
+ case KEY_TYPE_inline_data:
+ new_val_u64s = min(new_val_u64s, k.k->size << 6);
+ break;
+ }
- return BCH_MERGE_MERGE;
+ val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
+ BUG_ON(val_u64s_delta < 0);
+
+ set_bkey_val_u64s(k.k, new_val_u64s);
+ memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
+ return -val_u64s_delta;
}
diff --git a/fs/bcachefs/extents.h b/fs/bcachefs/extents.h
index 35a66d4f4ea2..ef2849e8b1f2 100644
--- a/fs/bcachefs/extents.h
+++ b/fs/bcachefs/extents.h
@@ -387,7 +387,7 @@ void bch2_bkey_ptrs_to_text(struct printbuf *, struct bch_fs *,
struct bkey_s_c);
const char *bch2_bkey_ptrs_invalid(const struct bch_fs *, struct bkey_s_c);
-/* bch_btree_ptr: */
+/* KEY_TYPE_btree_ptr: */
const char *bch2_btree_ptr_invalid(const struct bch_fs *, struct bkey_s_c);
void bch2_btree_ptr_debugcheck(struct bch_fs *, struct bkey_s_c);
@@ -402,7 +402,7 @@ void bch2_ptr_swab(const struct bkey_format *, struct bkey_packed *);
.swab = bch2_ptr_swab, \
}
-/* bch_extent: */
+/* KEY_TYPE_extent: */
const char *bch2_extent_invalid(const struct bch_fs *, struct bkey_s_c);
void bch2_extent_debugcheck(struct bch_fs *, struct bkey_s_c);
@@ -420,7 +420,7 @@ enum merge_result bch2_extent_merge(struct bch_fs *,
.key_merge = bch2_extent_merge, \
}
-/* bch_reservation: */
+/* KEY_TYPE_reservation: */
const char *bch2_reservation_invalid(const struct bch_fs *, struct bkey_s_c);
void bch2_reservation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);