diff options
author | Kent Overstreet <kent.overstreet@linux.dev> | 2023-11-12 20:53:57 -0500 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@linux.dev> | 2023-11-12 20:57:28 -0500 |
commit | 7fd6c3ffe45b3b42c0bc8a8c5d1387a5e3316a54 (patch) | |
tree | d596299da8c34dff74cb13caf9dd47d9154c25b4 /libbcachefs/btree_write_buffer.c | |
parent | a613340b26ad88801666362d2824118396f34c38 (diff) |
Update bcachefs sources to 3ca08ab51ec9 bcachefs: six locks: Simplify optimistic spinning
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'libbcachefs/btree_write_buffer.c')
-rw-r--r-- | libbcachefs/btree_write_buffer.c | 63 |
1 files changed, 31 insertions, 32 deletions
diff --git a/libbcachefs/btree_write_buffer.c b/libbcachefs/btree_write_buffer.c index 76b6f2dc..a6bf6ed3 100644 --- a/libbcachefs/btree_write_buffer.c +++ b/libbcachefs/btree_write_buffer.c @@ -9,9 +9,11 @@ #include "journal.h" #include "journal_reclaim.h" -#include <linux/atomic.h> #include <linux/sort.h> +static int bch2_btree_write_buffer_journal_flush(struct journal *, + struct journal_entry_pin *, u64); + static int btree_write_buffered_key_cmp(const void *_l, const void *_r) { const struct btree_write_buffered_key *l = _l; @@ -46,6 +48,13 @@ static int bch2_btree_write_buffer_flush_one(struct btree_trans *trans, if (ret) return ret; + /* + * We can't clone a path that has write locks: unshare it now, before + * set_pos and traverse(): + */ + if (iter->path->ref > 1) + iter->path = __bch2_btree_path_make_mut(trans, iter->path, true, _THIS_IP_); + path = iter->path; if (!*write_locked) { @@ -65,24 +74,18 @@ static int bch2_btree_write_buffer_flush_one(struct btree_trans *trans, bch2_btree_insert_key_leaf(trans, path, &wb->k, wb->journal_seq); (*fast)++; - - if (path->ref > 1) { - /* - * We can't clone a path that has write locks: if the path is - * shared, unlock before set_pos(), traverse(): - */ - bch2_btree_node_unlock_write(trans, path, path->l[0].b); - *write_locked = false; - } return 0; trans_commit: - return bch2_trans_update_seq(trans, wb->journal_seq, iter, &wb->k, - BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?: + trans->journal_res.seq = wb->journal_seq; + + return bch2_trans_update(trans, iter, &wb->k, + BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?: bch2_trans_commit(trans, NULL, NULL, commit_flags| - BTREE_INSERT_NOCHECK_RW| - BTREE_INSERT_NOFAIL| - BTREE_INSERT_JOURNAL_RECLAIM); + BCH_TRANS_COMMIT_no_check_rw| + BCH_TRANS_COMMIT_no_enospc| + BCH_TRANS_COMMIT_no_journal_res| + BCH_TRANS_COMMIT_journal_reclaim); } static union btree_write_buffer_state btree_write_buffer_switch(struct btree_write_buffer *wb) @@ -125,9 +128,11 @@ btree_write_buffered_insert(struct btree_trans *trans, bch2_trans_iter_init(trans, &iter, wb->btree, bkey_start_pos(&wb->k.k), BTREE_ITER_CACHED|BTREE_ITER_INTENT); + trans->journal_res.seq = wb->journal_seq; + ret = bch2_btree_iter_traverse(&iter) ?: - bch2_trans_update_seq(trans, wb->journal_seq, &iter, &wb->k, - BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE); + bch2_trans_update(trans, &iter, &wb->k, + BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE); bch2_trans_iter_exit(trans, &iter); return ret; } @@ -151,7 +156,8 @@ int __bch2_btree_write_buffer_flush(struct btree_trans *trans, unsigned commit_f if (!locked && !mutex_trylock(&wb->flush_lock)) return 0; - bch2_journal_pin_copy(j, &pin, &wb->journal_pin, NULL); + bch2_journal_pin_copy(j, &pin, &wb->journal_pin, + bch2_btree_write_buffer_journal_flush); bch2_journal_pin_drop(j, &wb->journal_pin); s = btree_write_buffer_switch(wb); @@ -169,7 +175,7 @@ int __bch2_btree_write_buffer_flush(struct btree_trans *trans, unsigned commit_f * However, since we're not flushing in the order they appear in the * journal we won't be able to drop our journal pin until everything is * flushed - which means this could deadlock the journal if we weren't - * passing BTREE_INSERT_JOURNAL_RECLAIM. This causes the update to fail + * passing BCH_TRANS_COMMIT_journal_reclaim. This causes the update to fail * if it would block taking a journal reservation. * * If that happens, simply skip the key so we can optimistically insert @@ -253,21 +259,14 @@ slowpath: if (!i->journal_seq) continue; - if (i->journal_seq > pin.seq) { - struct journal_entry_pin pin2; - - memset(&pin2, 0, sizeof(pin2)); - - bch2_journal_pin_add(j, i->journal_seq, &pin2, NULL); - bch2_journal_pin_drop(j, &pin); - bch2_journal_pin_copy(j, &pin, &pin2, NULL); - bch2_journal_pin_drop(j, &pin2); - } + bch2_journal_pin_update(j, i->journal_seq, &pin, + bch2_btree_write_buffer_journal_flush); ret = commit_do(trans, NULL, NULL, commit_flags| - BTREE_INSERT_NOFAIL| - BTREE_INSERT_JOURNAL_RECLAIM, + BCH_TRANS_COMMIT_no_enospc| + BCH_TRANS_COMMIT_no_journal_res| + BCH_TRANS_COMMIT_journal_reclaim, btree_write_buffered_insert(trans, i)); if (bch2_fs_fatal_err_on(ret, c, "%s: insert error %s", __func__, bch2_err_str(ret))) break; @@ -297,7 +296,7 @@ static int bch2_btree_write_buffer_journal_flush(struct journal *j, mutex_lock(&wb->flush_lock); return bch2_trans_run(c, - __bch2_btree_write_buffer_flush(trans, BTREE_INSERT_NOCHECK_RW, true)); + __bch2_btree_write_buffer_flush(trans, BCH_TRANS_COMMIT_no_check_rw, true)); } static inline u64 btree_write_buffer_ref(int idx) |