summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-02-05 14:09:30 -0500
committerKent Overstreet <kent.overstreet@linux.dev>2023-02-05 16:42:02 -0500
commite8d3eed1efa28366ab305aa56b7e5dcd2cb0e8db (patch)
tree0397357850c743f148e3c8c0a1a526b115117470
parent40131c28c581d27907f8afe555bea5fb034ef462 (diff)
six locks: Improved optimistic spinning
This adds a threshold for the maximum spin time, similar to the rwsem code, and a flag to the lock itself indicating when we've spun too long so other threads also refrain from spinning. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
-rw-r--r--include/linux/six.h3
-rw-r--r--kernel/locking/six.c52
2 files changed, 39 insertions, 16 deletions
diff --git a/include/linux/six.h b/include/linux/six.h
index 848aca7853fe..16ad2073f71c 100644
--- a/include/linux/six.h
+++ b/include/linux/six.h
@@ -80,9 +80,10 @@ union six_lock_state {
};
struct {
- unsigned read_lock:27;
+ unsigned read_lock:26;
unsigned write_locking:1;
unsigned intent_lock:1;
+ unsigned nospin:1;
unsigned waiters:3;
/*
* seq works much like in seqlocks: it's incremented every time
diff --git a/kernel/locking/six.c b/kernel/locking/six.c
index 8e9687a17454..0614b9158570 100644
--- a/kernel/locking/six.c
+++ b/kernel/locking/six.c
@@ -345,30 +345,39 @@ static bool __six_relock_type(struct six_lock *lock, enum six_lock_type type,
#ifdef CONFIG_LOCK_SPIN_ON_OWNER
-static inline int six_can_spin_on_owner(struct six_lock *lock)
+static inline bool six_can_spin_on_owner(struct six_lock *lock)
{
struct task_struct *owner;
- int retval = 1;
+ bool ret;
if (need_resched())
- return 0;
+ return false;
rcu_read_lock();
owner = READ_ONCE(lock->owner);
- if (owner)
- retval = owner->on_cpu;
+ ret = !owner || owner_on_cpu(owner);
rcu_read_unlock();
- /*
- * if lock->owner is not set, the mutex owner may have just acquired
- * it and not set the owner yet or the mutex has been released.
- */
- return retval;
+
+ return ret;
+}
+
+static inline void six_set_nospin(struct six_lock *lock)
+{
+ union six_lock_state old, new;
+ u64 v = READ_ONCE(lock->state.v);
+
+ do {
+ new.v = old.v = v;
+ new.nospin = true;
+ } while ((v = atomic64_cmpxchg(&lock->state.counter, old.v, new.v)) != old.v);
}
static inline bool six_spin_on_owner(struct six_lock *lock,
- struct task_struct *owner)
+ struct task_struct *owner,
+ u64 end_time)
{
bool ret = true;
+ unsigned loop = 0;
rcu_read_lock();
while (lock->owner == owner) {
@@ -380,7 +389,13 @@ static inline bool six_spin_on_owner(struct six_lock *lock,
*/
barrier();
- if (!owner->on_cpu || need_resched()) {
+ if (!owner_on_cpu(owner) || need_resched()) {
+ ret = false;
+ break;
+ }
+
+ if (!(++loop & 0xf) && (time_after64(sched_clock(), end_time))) {
+ six_set_nospin(lock);
ret = false;
break;
}
@@ -395,6 +410,7 @@ static inline bool six_spin_on_owner(struct six_lock *lock,
static inline bool six_optimistic_spin(struct six_lock *lock, enum six_lock_type type)
{
struct task_struct *task = current;
+ u64 end_time;
if (type == SIX_LOCK_write)
return false;
@@ -406,6 +422,8 @@ static inline bool six_optimistic_spin(struct six_lock *lock, enum six_lock_type
if (!osq_lock(&lock->osq))
goto fail;
+ end_time = sched_clock() + 10 * NSEC_PER_USEC;
+
while (1) {
struct task_struct *owner;
@@ -414,7 +432,7 @@ static inline bool six_optimistic_spin(struct six_lock *lock, enum six_lock_type
* release the lock or go to sleep.
*/
owner = READ_ONCE(lock->owner);
- if (owner && !six_spin_on_owner(lock, owner))
+ if (owner && !six_spin_on_owner(lock, owner, end_time))
break;
if (do_six_trylock_type(lock, type, false)) {
@@ -605,9 +623,13 @@ static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type)
smp_mb(); /* between unlocking and checking for waiters */
state.v = READ_ONCE(lock->state.v);
} else {
+ u64 v = l[type].unlock_val;
+
+ if (type != SIX_LOCK_read)
+ v -= lock->state.v & __SIX_VAL(nospin, 1);
+
EBUG_ON(!(lock->state.v & l[type].held_mask));
- state.v = atomic64_add_return_release(l[type].unlock_val,
- &lock->state.counter);
+ state.v = atomic64_add_return_release(v, &lock->state.counter);
}
six_lock_wakeup(lock, state, l[type].unlock_wakeup);