summaryrefslogtreecommitdiff
path: root/fs/bcachefs/six.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-05-20 20:37:53 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-08-14 12:28:33 -0400
commit74203e2d712aa744dd8354999eccc59155733be0 (patch)
tree561cb61c376f6388d9720b3d7d4e763571740e34 /fs/bcachefs/six.c
parent26525269ea5edcb7d86fe27ed2df3618a3980eb3 (diff)
six locks: Centralize setting of waiting bit
Originally, the waiting bit was always set by trylock() on failure: however, it's now set by __six_lock_type_slowpath(), with wait_lock held - which is the more correct place to do it. That made setting the waiting bit in trylock redundant, so this patch deletes that. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/six.c')
-rw-r--r--fs/bcachefs/six.c15
1 files changed, 3 insertions, 12 deletions
diff --git a/fs/bcachefs/six.c b/fs/bcachefs/six.c
index 01acc2a977c6..0594539b1dec 100644
--- a/fs/bcachefs/six.c
+++ b/fs/bcachefs/six.c
@@ -151,14 +151,6 @@ static int __do_six_trylock_type(struct six_lock *lock,
atomic64_add(__SIX_VAL(write_locking, 1),
&lock->state.counter);
smp_mb__after_atomic();
- } else if (!(lock->state.waiters & (1 << SIX_LOCK_write))) {
- atomic64_add(__SIX_VAL(waiters, 1 << SIX_LOCK_write),
- &lock->state.counter);
- /*
- * pairs with barrier after unlock and before checking
- * for readers in unlock path
- */
- smp_mb__after_atomic();
}
ret = !pcpu_read_count(lock);
@@ -190,10 +182,9 @@ static int __do_six_trylock_type(struct six_lock *lock,
if (type == SIX_LOCK_write)
new.write_locking = 0;
- } else if (!try && !(new.waiters & (1 << type)))
- new.waiters |= 1 << type;
- else
- break; /* waiting bit already set */
+ } else {
+ break;
+ }
} while ((v = atomic64_cmpxchg_acquire(&lock->state.counter,
old.v, new.v)) != old.v);