summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2022-10-15 00:34:38 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-05-22 12:29:10 -0400
commitbaf27c64dfb0c783fc0050378573f04ff14098cf (patch)
tree700067b9b5b9774faf5d7267a2e7d9df059aac6c
parent21b0969ed7b0b411cbbebde7487ca616a6c97005 (diff)
six locks: Fix a lost wakeup
There was a lost wakeup between a read unlock in percpu mode and a write lock. The unlock path unlocks, then executes a barrier, then checks for waiters; correspondingly, the lock side should set the wait bit and execute a barrier, then attempt to take the lock. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
-rw-r--r--kernel/locking/six.c11
1 files changed, 8 insertions, 3 deletions
diff --git a/kernel/locking/six.c b/kernel/locking/six.c
index 6e984a990b13..d197f3b1b4d5 100644
--- a/kernel/locking/six.c
+++ b/kernel/locking/six.c
@@ -197,6 +197,14 @@ retry:
atomic64_add(__SIX_VAL(write_locking, 1),
&lock->state.counter);
smp_mb__after_atomic();
+ } else if (!(lock->state.waiters & (1 << SIX_LOCK_write))) {
+ atomic64_add(__SIX_VAL(waiters, 1 << SIX_LOCK_write),
+ &lock->state.counter);
+ /*
+ * pairs with barrier after unlock and before checking
+ * for readers in unlock path
+ */
+ smp_mb__after_atomic();
}
ret = !pcpu_read_count(lock);
@@ -211,9 +219,6 @@ retry:
if (ret || try)
v -= __SIX_VAL(write_locking, 1);
- if (!ret && !try && !(lock->state.waiters & (1 << SIX_LOCK_write)))
- v += __SIX_VAL(waiters, 1 << SIX_LOCK_write);
-
if (try && !ret) {
old.v = atomic64_add_return(v, &lock->state.counter);
six_lock_wakeup(lock, old, SIX_LOCK_read);