diff options
author | Kumar Kartikeya Dwivedi <memxor@gmail.com> | 2025-03-15 21:05:29 -0700 |
---|---|---|
committer | Alexei Starovoitov <ast@kernel.org> | 2025-03-19 08:03:05 -0700 |
commit | c9102a68c070134ade5c941d7315481a77bcea53 (patch) | |
tree | 264d6bf0f79d4a80e07e48fe686ed28d52c1d8f6 | |
parent | 31158ad02ddbed2b0672c9701a0a2f3e5b3bc01a (diff) |
rqspinlock: Add a test-and-set fallback
Include a test-and-set fallback when queued spinlock support is not
available. Introduce a rqspinlock type to act as a fallback when
qspinlock support is absent.
Include ifdef guards to ensure the slow path in this file is only
compiled when CONFIG_QUEUED_SPINLOCKS=y. Subsequent patches will add
further logic to ensure fallback to the test-and-set implementation
when queued spinlock support is unavailable on an architecture.
Unlike other waiting loops in rqspinlock code, the one for test-and-set
has no theoretical upper bound under contention, therefore we need a
longer timeout than usual. Bump it up to a second in this case.
Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Link: https://lore.kernel.org/r/20250316040541.108729-14-memxor@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-rw-r--r-- | include/asm-generic/rqspinlock.h | 17 | ||||
-rw-r--r-- | kernel/bpf/rqspinlock.c | 46 |
2 files changed, 61 insertions, 2 deletions
diff --git a/include/asm-generic/rqspinlock.h b/include/asm-generic/rqspinlock.h index 34c3dcb4299e..12f72c4a97cd 100644 --- a/include/asm-generic/rqspinlock.h +++ b/include/asm-generic/rqspinlock.h @@ -12,11 +12,28 @@ #include <linux/types.h> #include <vdso/time64.h> #include <linux/percpu.h> +#ifdef CONFIG_QUEUED_SPINLOCKS +#include <asm/qspinlock.h> +#endif + +struct rqspinlock { + union { + atomic_t val; + u32 locked; + }; +}; struct qspinlock; +#ifdef CONFIG_QUEUED_SPINLOCKS typedef struct qspinlock rqspinlock_t; +#else +typedef struct rqspinlock rqspinlock_t; +#endif +extern int resilient_tas_spin_lock(rqspinlock_t *lock); +#ifdef CONFIG_QUEUED_SPINLOCKS extern int resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val); +#endif /* * Default timeout for waiting loops is 0.25 seconds diff --git a/kernel/bpf/rqspinlock.c b/kernel/bpf/rqspinlock.c index bddbcc47d38f..714dfab5caa8 100644 --- a/kernel/bpf/rqspinlock.c +++ b/kernel/bpf/rqspinlock.c @@ -21,7 +21,9 @@ #include <linux/mutex.h> #include <linux/prefetch.h> #include <asm/byteorder.h> +#ifdef CONFIG_QUEUED_SPINLOCKS #include <asm/qspinlock.h> +#endif #include <trace/events/lock.h> #include <asm/rqspinlock.h> #include <linux/timekeeping.h> @@ -29,9 +31,12 @@ /* * Include queued spinlock definitions and statistics code */ +#ifdef CONFIG_QUEUED_SPINLOCKS #include "../locking/qspinlock.h" #include "../locking/lock_events.h" #include "rqspinlock.h" +#include "../locking/mcs_spinlock.h" +#endif /* * The basic principle of a queue-based spinlock can best be understood @@ -70,8 +75,6 @@ * */ -#include "../locking/mcs_spinlock.h" - struct rqspinlock_timeout { u64 timeout_end; u64 duration; @@ -264,6 +267,43 @@ static noinline int check_timeout(rqspinlock_t *lock, u32 mask, #define RES_RESET_TIMEOUT(ts, _duration) ({ (ts).timeout_end = 0; (ts).duration = _duration; }) /* + * Provide a test-and-set fallback for cases when queued spin lock support is + * absent from the architecture. + */ +int __lockfunc resilient_tas_spin_lock(rqspinlock_t *lock) +{ + struct rqspinlock_timeout ts; + int val, ret = 0; + + RES_INIT_TIMEOUT(ts); + grab_held_lock_entry(lock); + + /* + * Since the waiting loop's time is dependent on the amount of + * contention, a short timeout unlike rqspinlock waiting loops + * isn't enough. Choose a second as the timeout value. + */ + RES_RESET_TIMEOUT(ts, NSEC_PER_SEC); +retry: + val = atomic_read(&lock->val); + + if (val || !atomic_try_cmpxchg(&lock->val, &val, 1)) { + if (RES_CHECK_TIMEOUT(ts, ret, ~0u)) + goto out; + cpu_relax(); + goto retry; + } + + return 0; +out: + release_held_lock_entry(); + return ret; +} +EXPORT_SYMBOL_GPL(resilient_tas_spin_lock); + +#ifdef CONFIG_QUEUED_SPINLOCKS + +/* * Per-CPU queue node structures; we can never have more than 4 nested * contexts: task, softirq, hardirq, nmi. * @@ -616,3 +656,5 @@ err_release_entry: return ret; } EXPORT_SYMBOL_GPL(resilient_queued_spin_lock_slowpath); + +#endif /* CONFIG_QUEUED_SPINLOCKS */ |