summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2019-08-09 13:01:10 -0400
committerKent Overstreet <kent.overstreet@gmail.com>2019-08-13 03:17:18 -0400
commit93845c35b795af4ef402e49f0f3e9afbaf3fd3bc (patch)
tree1ced6151a0362e5c3d97b2eafdd0c274dce846a4
parentce609c7c87c167ab69df89d75550d42734324e10 (diff)
bcachefs: Rework calling convention for marking overwrites
-rw-r--r--fs/bcachefs/alloc_background.c5
-rw-r--r--fs/bcachefs/btree_gc.c5
-rw-r--r--fs/bcachefs/btree_update_interior.c21
-rw-r--r--fs/bcachefs/buckets.c128
-rw-r--r--fs/bcachefs/buckets.h16
-rw-r--r--fs/bcachefs/ec.c2
-rw-r--r--fs/bcachefs/recovery.c2
7 files changed, 95 insertions, 84 deletions
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c
index 43dc2f270dc6..4cf728cea393 100644
--- a/fs/bcachefs/alloc_background.c
+++ b/fs/bcachefs/alloc_background.c
@@ -232,7 +232,7 @@ int bch2_alloc_read(struct bch_fs *c, struct journal_keys *journal_keys)
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_ALLOC, POS_MIN, 0, k, ret)
- bch2_mark_key(c, k, 0, NULL, 0,
+ bch2_mark_key(c, k, 0, 0, NULL, 0,
BCH_BUCKET_MARK_ALLOC_READ|
BCH_BUCKET_MARK_NOATOMIC);
@@ -244,7 +244,8 @@ int bch2_alloc_read(struct bch_fs *c, struct journal_keys *journal_keys)
for_each_journal_key(*journal_keys, j)
if (j->btree_id == BTREE_ID_ALLOC)
- bch2_mark_key(c, bkey_i_to_s_c(j->k), 0, NULL, 0,
+ bch2_mark_key(c, bkey_i_to_s_c(j->k),
+ 0, 0, NULL, 0,
BCH_BUCKET_MARK_ALLOC_READ|
BCH_BUCKET_MARK_NOATOMIC);
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
index a458cfe0e92d..e43d48b8a342 100644
--- a/fs/bcachefs/btree_gc.c
+++ b/fs/bcachefs/btree_gc.c
@@ -171,7 +171,7 @@ static int bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k,
*max_stale = max(*max_stale, ptr_stale(ca, ptr));
}
- bch2_mark_key(c, k, k.k->size, NULL, 0, flags);
+ bch2_mark_key(c, k, 0, k.k->size, NULL, 0, flags);
fsck_err:
return ret;
}
@@ -418,7 +418,8 @@ static void bch2_mark_pending_btree_node_frees(struct bch_fs *c)
for_each_pending_btree_node_free(c, as, d)
if (d->index_update_done)
- bch2_mark_key(c, bkey_i_to_s_c(&d->key), 0, NULL, 0,
+ bch2_mark_key(c, bkey_i_to_s_c(&d->key),
+ 0, 0, NULL, 0,
BCH_BUCKET_MARK_GC);
mutex_unlock(&c->btree_interior_update_lock);
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c
index 9294137719df..6813eddd26f5 100644
--- a/fs/bcachefs/btree_update_interior.c
+++ b/fs/bcachefs/btree_update_interior.c
@@ -194,7 +194,7 @@ found:
: gc_pos_btree_root(as->btree_id)) >= 0 &&
gc_pos_cmp(c->gc_pos, gc_phase(GC_PHASE_PENDING_DELETE)) < 0)
bch2_mark_key_locked(c, bkey_i_to_s_c(&d->key),
- 0, NULL, 0,
+ 0, 0, NULL, 0,
BCH_BUCKET_MARK_OVERWRITE|
BCH_BUCKET_MARK_GC);
}
@@ -266,11 +266,12 @@ static void bch2_btree_node_free_ondisk(struct bch_fs *c,
{
BUG_ON(!pending->index_update_done);
- bch2_mark_key(c, bkey_i_to_s_c(&pending->key), 0, NULL, 0,
- BCH_BUCKET_MARK_OVERWRITE);
+ bch2_mark_key(c, bkey_i_to_s_c(&pending->key),
+ 0, 0, NULL, 0, BCH_BUCKET_MARK_OVERWRITE);
if (gc_visited(c, gc_phase(GC_PHASE_PENDING_DELETE)))
- bch2_mark_key(c, bkey_i_to_s_c(&pending->key), 0, NULL, 0,
+ bch2_mark_key(c, bkey_i_to_s_c(&pending->key),
+ 0, 0, NULL, 0,
BCH_BUCKET_MARK_OVERWRITE|
BCH_BUCKET_MARK_GC);
}
@@ -1077,11 +1078,11 @@ static void bch2_btree_set_root_inmem(struct btree_update *as, struct btree *b)
fs_usage = bch2_fs_usage_scratch_get(c);
bch2_mark_key_locked(c, bkey_i_to_s_c(&b->key),
- 0, fs_usage, 0,
+ 0, 0, fs_usage, 0,
BCH_BUCKET_MARK_INSERT);
if (gc_visited(c, gc_pos_btree_root(b->btree_id)))
bch2_mark_key_locked(c, bkey_i_to_s_c(&b->key),
- 0, NULL, 0,
+ 0, 0, NULL, 0,
BCH_BUCKET_MARK_INSERT|
BCH_BUCKET_MARK_GC);
@@ -1175,12 +1176,12 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b
fs_usage = bch2_fs_usage_scratch_get(c);
bch2_mark_key_locked(c, bkey_i_to_s_c(insert),
- 0, fs_usage, 0,
+ 0, 0, fs_usage, 0,
BCH_BUCKET_MARK_INSERT);
if (gc_visited(c, gc_pos_btree_node(b)))
bch2_mark_key_locked(c, bkey_i_to_s_c(insert),
- 0, NULL, 0,
+ 0, 0, NULL, 0,
BCH_BUCKET_MARK_INSERT|
BCH_BUCKET_MARK_GC);
@@ -2003,11 +2004,11 @@ static void __bch2_btree_node_update_key(struct bch_fs *c,
fs_usage = bch2_fs_usage_scratch_get(c);
bch2_mark_key_locked(c, bkey_i_to_s_c(&new_key->k_i),
- 0, fs_usage, 0,
+ 0, 0, fs_usage, 0,
BCH_BUCKET_MARK_INSERT);
if (gc_visited(c, gc_pos_btree_root(b->btree_id)))
bch2_mark_key_locked(c, bkey_i_to_s_c(&new_key->k_i),
- 0, NULL, 0,
+ 0, 0, NULL, 0,
BCH_BUCKET_MARK_INSERT||
BCH_BUCKET_MARK_GC);
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index 605be7badabb..eaa7005b35a8 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -810,23 +810,24 @@ void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
}
static s64 ptr_disk_sectors_delta(struct extent_ptr_decoded p,
- s64 delta)
+ unsigned offset, s64 delta,
+ unsigned flags)
{
- if (delta > 0) {
- /*
- * marking a new extent, which _will have size_ @delta
- *
- * in the bch2_mark_update -> BCH_EXTENT_OVERLAP_MIDDLE
- * case, we haven't actually created the key we'll be inserting
- * yet (for the split) - so we don't want to be using
- * k->size/crc.live_size here:
- */
- return __ptr_disk_sectors(p, delta);
+ if (flags & BCH_BUCKET_MARK_OVERWRITE_SPLIT) {
+ BUG_ON(offset + -delta > p.crc.live_size);
+
+ return -((s64) ptr_disk_sectors(p)) +
+ __ptr_disk_sectors(p, offset) +
+ __ptr_disk_sectors(p, p.crc.live_size -
+ offset + delta);
+ } else if (flags & BCH_BUCKET_MARK_OVERWRITE) {
+ BUG_ON(offset + -delta > p.crc.live_size);
+
+ return -((s64) ptr_disk_sectors(p)) +
+ __ptr_disk_sectors(p, p.crc.live_size +
+ delta);
} else {
- BUG_ON(-delta > p.crc.live_size);
-
- return (s64) __ptr_disk_sectors(p, p.crc.live_size + delta) -
- (s64) ptr_disk_sectors(p);
+ return ptr_disk_sectors(p);
}
}
@@ -1005,7 +1006,8 @@ static int bch2_mark_stripe_ptr(struct bch_fs *c,
}
static int bch2_mark_extent(struct bch_fs *c, struct bkey_s_c k,
- s64 sectors, enum bch_data_type data_type,
+ unsigned offset, s64 sectors,
+ enum bch_data_type data_type,
struct bch_fs_usage *fs_usage,
unsigned journal_seq, unsigned flags)
{
@@ -1026,7 +1028,7 @@ static int bch2_mark_extent(struct bch_fs *c, struct bkey_s_c k,
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
s64 disk_sectors = data_type == BCH_DATA_BTREE
? sectors
- : ptr_disk_sectors_delta(p, sectors);
+ : ptr_disk_sectors_delta(p, offset, sectors, flags);
bool stale = bch2_mark_pointer(c, p, disk_sectors, data_type,
fs_usage, journal_seq, flags);
@@ -1115,7 +1117,8 @@ static int bch2_mark_stripe(struct bch_fs *c, struct bkey_s_c k,
}
int bch2_mark_key_locked(struct bch_fs *c,
- struct bkey_s_c k, s64 sectors,
+ struct bkey_s_c k,
+ unsigned offset, s64 sectors,
struct bch_fs_usage *fs_usage,
u64 journal_seq, unsigned flags)
{
@@ -1136,11 +1139,11 @@ int bch2_mark_key_locked(struct bch_fs *c,
? c->opts.btree_node_size
: -c->opts.btree_node_size;
- ret = bch2_mark_extent(c, k, sectors, BCH_DATA_BTREE,
+ ret = bch2_mark_extent(c, k, offset, sectors, BCH_DATA_BTREE,
fs_usage, journal_seq, flags);
break;
case KEY_TYPE_extent:
- ret = bch2_mark_extent(c, k, sectors, BCH_DATA_USER,
+ ret = bch2_mark_extent(c, k, offset, sectors, BCH_DATA_USER,
fs_usage, journal_seq, flags);
break;
case KEY_TYPE_stripe:
@@ -1171,14 +1174,14 @@ int bch2_mark_key_locked(struct bch_fs *c,
}
int bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
- s64 sectors,
+ unsigned offset, s64 sectors,
struct bch_fs_usage *fs_usage,
u64 journal_seq, unsigned flags)
{
int ret;
percpu_down_read(&c->mark_lock);
- ret = bch2_mark_key_locked(c, k, sectors,
+ ret = bch2_mark_key_locked(c, k, offset, sectors,
fs_usage, journal_seq, flags);
percpu_up_read(&c->mark_lock);
@@ -1194,8 +1197,11 @@ inline int bch2_mark_overwrite(struct btree_trans *trans,
{
struct bch_fs *c = trans->c;
struct btree *b = iter->l[0].b;
+ unsigned offset = 0;
s64 sectors = 0;
+ flags |= BCH_BUCKET_MARK_OVERWRITE;
+
if (btree_node_is_extents(b)
? bkey_cmp(new->k.p, bkey_start_pos(old.k)) <= 0
: bkey_cmp(new->k.p, old.k->p))
@@ -1204,35 +1210,33 @@ inline int bch2_mark_overwrite(struct btree_trans *trans,
if (btree_node_is_extents(b)) {
switch (bch2_extent_overlap(&new->k, old.k)) {
case BCH_EXTENT_OVERLAP_ALL:
+ offset = 0;
sectors = -((s64) old.k->size);
break;
case BCH_EXTENT_OVERLAP_BACK:
+ offset = bkey_start_offset(&new->k) -
+ bkey_start_offset(old.k);
sectors = bkey_start_offset(&new->k) -
old.k->p.offset;
break;
case BCH_EXTENT_OVERLAP_FRONT:
+ offset = 0;
sectors = bkey_start_offset(old.k) -
new->k.p.offset;
break;
case BCH_EXTENT_OVERLAP_MIDDLE:
- sectors = old.k->p.offset - new->k.p.offset;
- BUG_ON(sectors <= 0);
-
- bch2_mark_key_locked(c, old, sectors,
- fs_usage, trans->journal_res.seq,
- BCH_BUCKET_MARK_INSERT|flags);
-
- sectors = bkey_start_offset(&new->k) -
- old.k->p.offset;
+ offset = bkey_start_offset(&new->k) -
+ bkey_start_offset(old.k);
+ sectors = -((s64) new->k.size);
+ flags |= BCH_BUCKET_MARK_OVERWRITE_SPLIT;
break;
}
BUG_ON(sectors >= 0);
}
- return bch2_mark_key_locked(c, old, sectors, fs_usage,
- trans->journal_res.seq,
- BCH_BUCKET_MARK_OVERWRITE|flags) ?: 1;
+ return bch2_mark_key_locked(c, old, offset, sectors, fs_usage,
+ trans->journal_res.seq, flags) ?: 1;
}
int bch2_mark_update(struct btree_trans *trans,
@@ -1250,10 +1254,12 @@ int bch2_mark_update(struct btree_trans *trans,
if (!btree_node_type_needs_gc(iter->btree_id))
return 0;
+ EBUG_ON(btree_node_is_extents(b) &&
+ !bch2_extent_is_atomic(insert->k, insert->iter));
+
if (!(trans->flags & BTREE_INSERT_NOMARK_INSERT))
bch2_mark_key_locked(c, bkey_i_to_s_c(insert->k),
- bpos_min(insert->k->k.p, b->key.k.p).offset -
- bkey_start_offset(&insert->k->k),
+ 0, insert->k->k.size,
fs_usage, trans->journal_res.seq,
BCH_BUCKET_MARK_INSERT|flags);
@@ -1518,8 +1524,9 @@ out:
}
static int bch2_trans_mark_extent(struct btree_trans *trans,
- struct bkey_s_c k,
- s64 sectors, enum bch_data_type data_type)
+ struct bkey_s_c k, unsigned offset,
+ s64 sectors, unsigned flags,
+ enum bch_data_type data_type)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
@@ -1539,7 +1546,7 @@ static int bch2_trans_mark_extent(struct btree_trans *trans,
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
s64 disk_sectors = data_type == BCH_DATA_BTREE
? sectors
- : ptr_disk_sectors_delta(p, sectors);
+ : ptr_disk_sectors_delta(p, offset, sectors, flags);
ret = bch2_trans_mark_pointer(trans, p, disk_sectors,
data_type);
@@ -1574,7 +1581,7 @@ static int bch2_trans_mark_extent(struct btree_trans *trans,
}
int bch2_trans_mark_key(struct btree_trans *trans, struct bkey_s_c k,
- s64 sectors, unsigned flags)
+ unsigned offset, s64 sectors, unsigned flags)
{
struct replicas_delta_list *d;
struct bch_fs *c = trans->c;
@@ -1585,11 +1592,11 @@ int bch2_trans_mark_key(struct btree_trans *trans, struct bkey_s_c k,
? c->opts.btree_node_size
: -c->opts.btree_node_size;
- return bch2_trans_mark_extent(trans, k, sectors,
- BCH_DATA_BTREE);
+ return bch2_trans_mark_extent(trans, k, offset, sectors,
+ flags, BCH_DATA_BTREE);
case KEY_TYPE_extent:
- return bch2_trans_mark_extent(trans, k, sectors,
- BCH_DATA_USER);
+ return bch2_trans_mark_extent(trans, k, offset, sectors,
+ flags, BCH_DATA_USER);
case KEY_TYPE_inode:
d = replicas_deltas_realloc(trans, 0);
@@ -1628,11 +1635,11 @@ int bch2_trans_mark_update(struct btree_trans *trans,
if (!btree_node_type_needs_gc(iter->btree_id))
return 0;
- ret = bch2_trans_mark_key(trans,
- bkey_i_to_s_c(insert),
- bpos_min(insert->k.p, b->key.k.p).offset -
- bkey_start_offset(&insert->k),
- BCH_BUCKET_MARK_INSERT);
+ EBUG_ON(btree_node_is_extents(b) &&
+ !bch2_extent_is_atomic(insert, iter));
+
+ ret = bch2_trans_mark_key(trans, bkey_i_to_s_c(insert),
+ 0, insert->k.size, BCH_BUCKET_MARK_INSERT);
if (ret)
return ret;
@@ -1640,7 +1647,9 @@ int bch2_trans_mark_update(struct btree_trans *trans,
KEY_TYPE_discard))) {
struct bkey unpacked;
struct bkey_s_c k;
+ unsigned offset = 0;
s64 sectors = 0;
+ unsigned flags = BCH_BUCKET_MARK_OVERWRITE;
k = bkey_disassemble(b, _k, &unpacked);
@@ -1652,35 +1661,32 @@ int bch2_trans_mark_update(struct btree_trans *trans,
if (btree_node_is_extents(b)) {
switch (bch2_extent_overlap(&insert->k, k.k)) {
case BCH_EXTENT_OVERLAP_ALL:
+ offset = 0;
sectors = -((s64) k.k->size);
break;
case BCH_EXTENT_OVERLAP_BACK:
+ offset = bkey_start_offset(&insert->k) -
+ bkey_start_offset(k.k);
sectors = bkey_start_offset(&insert->k) -
k.k->p.offset;
break;
case BCH_EXTENT_OVERLAP_FRONT:
+ offset = 0;
sectors = bkey_start_offset(k.k) -
insert->k.p.offset;
break;
case BCH_EXTENT_OVERLAP_MIDDLE:
- sectors = k.k->p.offset - insert->k.p.offset;
- BUG_ON(sectors <= 0);
-
- ret = bch2_trans_mark_key(trans, k, sectors,
- BCH_BUCKET_MARK_INSERT);
- if (ret)
- return ret;
-
- sectors = bkey_start_offset(&insert->k) -
- k.k->p.offset;
+ offset = bkey_start_offset(&insert->k) -
+ bkey_start_offset(k.k);
+ sectors = -((s64) insert->k.size);
+ flags |= BCH_BUCKET_MARK_OVERWRITE_SPLIT;
break;
}
BUG_ON(sectors >= 0);
}
- ret = bch2_trans_mark_key(trans, k, sectors,
- BCH_BUCKET_MARK_OVERWRITE);
+ ret = bch2_trans_mark_key(trans, k, offset, sectors, flags);
if (ret)
return ret;
diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h
index 5ab6f3d34137..799bfb3c96d8 100644
--- a/fs/bcachefs/buckets.h
+++ b/fs/bcachefs/buckets.h
@@ -251,14 +251,15 @@ void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
#define BCH_BUCKET_MARK_INSERT (1 << 0)
#define BCH_BUCKET_MARK_OVERWRITE (1 << 1)
-#define BCH_BUCKET_MARK_BUCKET_INVALIDATE (1 << 2)
-#define BCH_BUCKET_MARK_GC (1 << 3)
-#define BCH_BUCKET_MARK_ALLOC_READ (1 << 4)
-#define BCH_BUCKET_MARK_NOATOMIC (1 << 5)
+#define BCH_BUCKET_MARK_OVERWRITE_SPLIT (1 << 2)
+#define BCH_BUCKET_MARK_BUCKET_INVALIDATE (1 << 3)
+#define BCH_BUCKET_MARK_GC (1 << 4)
+#define BCH_BUCKET_MARK_ALLOC_READ (1 << 5)
+#define BCH_BUCKET_MARK_NOATOMIC (1 << 6)
-int bch2_mark_key_locked(struct bch_fs *, struct bkey_s_c, s64,
+int bch2_mark_key_locked(struct bch_fs *, struct bkey_s_c, unsigned, s64,
struct bch_fs_usage *, u64, unsigned);
-int bch2_mark_key(struct bch_fs *, struct bkey_s_c, s64,
+int bch2_mark_key(struct bch_fs *, struct bkey_s_c, unsigned, s64,
struct bch_fs_usage *, u64, unsigned);
int bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage *,
struct disk_reservation *, unsigned);
@@ -272,7 +273,8 @@ int bch2_mark_update(struct btree_trans *, struct btree_insert_entry *,
void bch2_replicas_delta_list_apply(struct bch_fs *,
struct bch_fs_usage *,
struct replicas_delta_list *);
-int bch2_trans_mark_key(struct btree_trans *, struct bkey_s_c, s64, unsigned);
+int bch2_trans_mark_key(struct btree_trans *, struct bkey_s_c,
+ unsigned, s64, unsigned);
int bch2_trans_mark_update(struct btree_trans *,
struct btree_iter *iter,
struct bkey_i *insert);
diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c
index 438346b12901..bdb18c2a0b5c 100644
--- a/fs/bcachefs/ec.c
+++ b/fs/bcachefs/ec.c
@@ -1312,7 +1312,7 @@ int bch2_stripes_read(struct bch_fs *c, struct journal_keys *journal_keys)
break;
}
- bch2_mark_key(c, k, 0, NULL, 0,
+ bch2_mark_key(c, k, 0, 0, NULL, 0,
BCH_BUCKET_MARK_ALLOC_READ|
BCH_BUCKET_MARK_NOATOMIC);
}
diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c
index 92867b5c078f..a7fc3fe4284a 100644
--- a/fs/bcachefs/recovery.c
+++ b/fs/bcachefs/recovery.c
@@ -295,7 +295,7 @@ retry:
if (split_compressed) {
ret = bch2_trans_mark_key(&trans, bkey_i_to_s_c(k),
- -((s64) k->k.size),
+ 0, -((s64) k->k.size),
BCH_BUCKET_MARK_OVERWRITE) ?:
bch2_trans_commit(&trans, &disk_res, NULL,
BTREE_INSERT_ATOMIC|