diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2018-08-03 19:41:44 -0400 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@gmail.com> | 2018-08-12 18:17:53 -0400 |
commit | 0f141ac3dda3ee656339b59fdc3f9d559d509e21 (patch) | |
tree | 3696d806da26948cd78ed8c9a78be1bdcba8a979 | |
parent | 6eb9e65ca8f8f01263c93d49fc452f3d6b4c1f2f (diff) |
bcachefs: Factor out btree_key_can_insert()
working on getting rid of all the reasons bch2_insert_fixup_extent() can
fail/stop partway, which is needed for other refactorings.
One of the reasons we could have to bail out is if we're splitting a
compressed extent we might need to add to our disk reservation - but we
can check that before actually starting the insert.
-rw-r--r-- | fs/bcachefs/btree_types.h | 1 | ||||
-rw-r--r-- | fs/bcachefs/btree_update_leaf.c | 55 | ||||
-rw-r--r-- | fs/bcachefs/extents.c | 46 | ||||
-rw-r--r-- | fs/bcachefs/extents.h | 6 |
4 files changed, 73 insertions, 35 deletions
diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h index 39e2db757f9a..9cbd5316d5e6 100644 --- a/fs/bcachefs/btree_types.h +++ b/fs/bcachefs/btree_types.h @@ -447,7 +447,6 @@ enum btree_insert_ret { /* extent spanned multiple leaf nodes: have to traverse to next node: */ BTREE_INSERT_NEED_TRAVERSE, /* write lock held for too long */ - BTREE_INSERT_NEED_RESCHED, /* leaf node needs to be split */ BTREE_INSERT_BTREE_NODE_FULL, BTREE_INSERT_JOURNAL_RES_FULL, diff --git a/fs/bcachefs/btree_update_leaf.c b/fs/bcachefs/btree_update_leaf.c index f4d8dbfcb815..6abc79db5e89 100644 --- a/fs/bcachefs/btree_update_leaf.c +++ b/fs/bcachefs/btree_update_leaf.c @@ -296,6 +296,30 @@ static inline int btree_trans_cmp(struct btree_insert_entry l, /* Normal update interface: */ +static enum btree_insert_ret +btree_key_can_insert(struct btree_insert *trans, + struct btree_insert_entry *insert, + unsigned *u64s) +{ + struct bch_fs *c = trans->c; + struct btree *b = insert->iter->l[0].b; + static enum btree_insert_ret ret; + + if (unlikely(btree_node_fake(b))) + return BTREE_INSERT_BTREE_NODE_FULL; + + ret = !btree_node_is_extents(b) + ? BTREE_INSERT_OK + : bch2_extent_can_insert(trans, insert, u64s); + if (ret) + return ret; + + if (*u64s > bch_btree_keys_u64s_remaining(c, b)) + return BTREE_INSERT_BTREE_NODE_FULL; + + return BTREE_INSERT_OK; +} + /* * Get journal reservation, take write locks, and attempt to do btree update(s): */ @@ -335,24 +359,34 @@ static inline int do_btree_insert_at(struct btree_insert *trans, goto out; } + /* + * Check if the insert will fit in the leaf node with the write lock + * held, otherwise another thread could write the node changing the + * amount of space available: + */ u64s = 0; trans_for_each_entry(trans, i) { /* Multiple inserts might go to same leaf: */ if (!same_leaf_as_prev(trans, i)) u64s = 0; - /* - * bch2_btree_node_insert_fits() must be called under write lock: - * with only an intent lock, another thread can still call - * bch2_btree_node_write(), converting an unwritten bset to a - * written one - */ u64s += i->k->k.u64s + i->extra_res; - if (!bch2_btree_node_insert_fits(c, - i->iter->l[0].b, u64s)) { + switch (btree_key_can_insert(trans, i, &u64s)) { + case BTREE_INSERT_OK: + break; + case BTREE_INSERT_BTREE_NODE_FULL: ret = -EINTR; *split = i->iter; goto out; + case BTREE_INSERT_ENOSPC: + ret = -ENOSPC; + goto out; + case BTREE_INSERT_NEED_GC_LOCK: + ret = -EINTR; + *cycle_gc_lock = true; + goto out; + default: + BUG(); } } @@ -372,7 +406,6 @@ static inline int do_btree_insert_at(struct btree_insert *trans, break; case BTREE_INSERT_JOURNAL_RES_FULL: case BTREE_INSERT_NEED_TRAVERSE: - case BTREE_INSERT_NEED_RESCHED: ret = -EINTR; break; case BTREE_INSERT_BTREE_NODE_FULL: @@ -382,10 +415,6 @@ static inline int do_btree_insert_at(struct btree_insert *trans, case BTREE_INSERT_ENOSPC: ret = -ENOSPC; break; - case BTREE_INSERT_NEED_GC_LOCK: - ret = -EINTR; - *cycle_gc_lock = true; - break; default: BUG(); } diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c index be4dd2307a43..8941c29717a7 100644 --- a/fs/bcachefs/extents.c +++ b/fs/bcachefs/extents.c @@ -1113,8 +1113,6 @@ static bool bch2_extent_merge_inline(struct bch_fs *, struct bkey_packed *, bool); -#define MAX_LOCK_HOLD_TIME (5 * NSEC_PER_MSEC) - static enum btree_insert_ret extent_insert_should_stop(struct extent_insert_state *s) { @@ -1287,23 +1285,41 @@ extent_insert_advance_pos(struct extent_insert_state *s, struct bkey_s_c k) return __extent_insert_advance_pos(s, next_pos, k); } -static enum btree_insert_ret -extent_insert_check_split_compressed(struct extent_insert_state *s, - struct bkey_s_c k, - enum bch_extent_overlap overlap) +enum btree_insert_ret +bch2_extent_can_insert(struct btree_insert *trans, + struct btree_insert_entry *insert, + unsigned *u64s) { - struct bch_fs *c = s->trans->c; - unsigned sectors; + struct btree_iter_level *l = &insert->iter->l[0]; + struct btree_node_iter node_iter = l->iter; + enum bch_extent_overlap overlap; + struct bkey_packed *_k; + struct bkey unpacked; + struct bkey_s_c k; + int sectors; + + _k = bch2_btree_node_iter_peek_filter(&node_iter, l->b, + KEY_TYPE_DISCARD); + if (!_k) + return BTREE_INSERT_OK; + + k = bkey_disassemble(l->b, _k, &unpacked); + + overlap = bch2_extent_overlap(&insert->k->k, k.k); + + /* account for having to split existing extent: */ + if (overlap == BCH_EXTENT_OVERLAP_MIDDLE) + *u64s += _k->u64s; if (overlap == BCH_EXTENT_OVERLAP_MIDDLE && (sectors = bch2_extent_is_compressed(k))) { int flags = BCH_DISK_RESERVATION_BTREE_LOCKS_HELD; - if (s->trans->flags & BTREE_INSERT_NOFAIL) + if (trans->flags & BTREE_INSERT_NOFAIL) flags |= BCH_DISK_RESERVATION_NOFAIL; - switch (bch2_disk_reservation_add(c, - s->trans->disk_res, + switch (bch2_disk_reservation_add(trans->c, + trans->disk_res, sectors * bch2_extent_nr_dirty_ptrs(k), flags)) { case 0: @@ -1471,10 +1487,6 @@ __bch2_delete_fixup_extent(struct extent_insert_state *s) overlap = bch2_extent_overlap(&insert->k, k.k); - ret = extent_insert_check_split_compressed(s, k.s_c, overlap); - if (ret) - break; - ret = extent_insert_advance_pos(s, k.s_c); if (ret) break; @@ -1550,10 +1562,6 @@ __bch2_insert_fixup_extent(struct extent_insert_state *s) overlap = bch2_extent_overlap(&insert->k, k.k); - ret = extent_insert_check_split_compressed(s, k.s_c, overlap); - if (ret) - break; - if (!k.k->size) goto squash; diff --git a/fs/bcachefs/extents.h b/fs/bcachefs/extents.h index ca61e30f9deb..c0ddec3a9b03 100644 --- a/fs/bcachefs/extents.h +++ b/fs/bcachefs/extents.h @@ -62,8 +62,10 @@ int bch2_extent_pick_ptr(struct bch_fs *, struct bkey_s_c, struct extent_pick_ptr *); enum btree_insert_ret -bch2_insert_fixup_extent(struct btree_insert *, - struct btree_insert_entry *); +bch2_extent_can_insert(struct btree_insert *, struct btree_insert_entry *, + unsigned *); +enum btree_insert_ret +bch2_insert_fixup_extent(struct btree_insert *, struct btree_insert_entry *); bool bch2_extent_normalize(struct bch_fs *, struct bkey_s); void bch2_extent_mark_replicas_cached(struct bch_fs *, struct bkey_s_extent, |