summaryrefslogtreecommitdiff
path: root/fs/bcachefs/btree_locking.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2022-10-09 04:55:02 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-06-20 22:52:15 -0400
commit565eb5097afef466af6337aa7b51b683fd3f7967 (patch)
treebbff8c75f5e2b07ec95877300afd193ad6804693 /fs/bcachefs/btree_locking.c
parentd09a5ae8288bbe8c4028e45743af95e0c6d8961a (diff)
bcachefs: Simplify break_cycle()
We'd like to prioritize aborting transactions that have done less work - however, it appears breaking cycles by telling other threads to abort may still be buggy, so disable that for now. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/btree_locking.c')
-rw-r--r--fs/bcachefs/btree_locking.c19
1 files changed, 10 insertions, 9 deletions
diff --git a/fs/bcachefs/btree_locking.c b/fs/bcachefs/btree_locking.c
index 458f870c7682..aaf59026c30e 100644
--- a/fs/bcachefs/btree_locking.c
+++ b/fs/bcachefs/btree_locking.c
@@ -96,25 +96,26 @@ static noinline void print_chain(struct printbuf *out, struct lock_graph *g)
static int abort_lock(struct lock_graph *g, struct trans_waiting_for_lock *i)
{
- int ret;
-
if (i == g->g) {
trace_and_count(i->trans->c, trans_restart_would_deadlock, i->trans, _RET_IP_);
- ret = btree_trans_restart(i->trans, BCH_ERR_transaction_restart_would_deadlock);
+ return btree_trans_restart(i->trans, BCH_ERR_transaction_restart_would_deadlock);
} else {
i->trans->lock_must_abort = true;
- ret = 0;
- }
-
- for (i = g->g + 1; i < g->g + g->nr; i++)
wake_up_process(i->trans->locking_wait.task);
- return ret;
+ return 0;
+ }
}
static noinline int break_cycle(struct lock_graph *g)
{
struct trans_waiting_for_lock *i;
+ /*
+ * We'd like to prioritize aborting transactions that have done less
+ * work - but it appears breaking cycles by telling other transactions
+ * to abort may still be buggy:
+ */
+#if 0
for (i = g->g; i < g->g + g->nr; i++) {
if (i->trans->lock_may_not_fail ||
i->trans->locking_wait.lock_want == SIX_LOCK_write)
@@ -130,7 +131,7 @@ static noinline int break_cycle(struct lock_graph *g)
return abort_lock(g, i);
}
-
+#endif
for (i = g->g; i < g->g + g->nr; i++) {
if (i->trans->lock_may_not_fail)
continue;