summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-08-12 15:05:06 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-08-14 12:28:45 -0400
commit2a864162c797534b702705276663d4fce8505f0c (patch)
tree0760c8f94035fe37ab4e7de3a8c67bd594a3f854
parent809536034d1f0863c8f06acfb9c7d343a90b61e3 (diff)
bcachefs: six locks: Fix missing barrier on wait->lock_acquired
Six locks do lock handoff via the wakeup path: the thread doing the wakeup also takes the lock on behalf of the waiter, which means the waiter only has to look at its waitlist entry, and doesn't have to touch the lock cacheline while another thread is using it. Linus noticed that this needs a real barrier, which this patch fixes. Also add a comment for the should_sleep_fn() error path. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: linux-bcachefs@vger.kernel.org Cc: linux-kernel@vger.kernel.org
-rw-r--r--fs/bcachefs/six.c33
1 files changed, 25 insertions, 8 deletions
diff --git a/fs/bcachefs/six.c b/fs/bcachefs/six.c
index 581aee565e95..b6ca53c85249 100644
--- a/fs/bcachefs/six.c
+++ b/fs/bcachefs/six.c
@@ -223,14 +223,16 @@ again:
if (ret <= 0)
goto unlock;
- __list_del(w->list.prev, w->list.next);
task = w->task;
+ __list_del(w->list.prev, w->list.next);
/*
- * Do no writes to @w besides setting lock_acquired - otherwise
- * we would need a memory barrier:
+ * The release barrier here ensures the ordering of the
+ * __list_del before setting w->lock_acquired; @w is on the
+ * stack of the thread doing the waiting and will be reused
+ * after it sees w->lock_acquired with no other locking:
+ * pairs with smp_load_acquire() in six_lock_slowpath()
*/
- barrier();
- w->lock_acquired = true;
+ smp_store_release(&w->lock_acquired, true);
wake_up_process(task);
}
@@ -502,17 +504,32 @@ static int six_lock_slowpath(struct six_lock *lock, enum six_lock_type type,
while (1) {
set_current_state(TASK_UNINTERRUPTIBLE);
- if (wait->lock_acquired)
+ /*
+ * Ensures that writes to the waitlist entry happen after we see
+ * wait->lock_acquired: pairs with the smp_store_release in
+ * __six_lock_wakeup
+ */
+ if (smp_load_acquire(&wait->lock_acquired))
break;
ret = should_sleep_fn ? should_sleep_fn(lock, p) : 0;
if (unlikely(ret)) {
+ bool acquired;
+
+ /*
+ * If should_sleep_fn() returns an error, we are
+ * required to return that error even if we already
+ * acquired the lock - should_sleep_fn() might have
+ * modified external state (e.g. when the deadlock cycle
+ * detector in bcachefs issued a transaction restart)
+ */
raw_spin_lock(&lock->wait_lock);
- if (!wait->lock_acquired)
+ acquired = wait->lock_acquired;
+ if (!acquired)
list_del(&wait->list);
raw_spin_unlock(&lock->wait_lock);
- if (unlikely(wait->lock_acquired))
+ if (unlikely(acquired))
do_six_unlock_type(lock, type);
break;
}