summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBrian Gerst <brgerst@gmail.com>2025-03-03 11:52:40 -0500
committerIngo Molnar <mingo@kernel.org>2025-03-04 20:30:33 +0100
commit839be1619fb897364b782997bc2c5d072d7a79f2 (patch)
treea3ea3d4c9ae140c16ae1c476d03c04db687eb25a
parent01c7bc5198e9ef5628fad0346c5cdba2633a79da (diff)
x86/retbleed: Move call depth to percpu hot section
No functional change. Signed-off-by: Brian Gerst <brgerst@gmail.com> Signed-off-by: Ingo Molnar <mingo@kernel.org> Acked-by: Uros Bizjak <ubizjak@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: https://lore.kernel.org/r/20250303165246.2175811-6-brgerst@gmail.com
-rw-r--r--arch/x86/include/asm/current.h3
-rw-r--r--arch/x86/include/asm/nospec-branch.h11
-rw-r--r--arch/x86/kernel/asm-offsets.c3
-rw-r--r--arch/x86/kernel/cpu/common.c8
-rw-r--r--arch/x86/lib/retpoline.S2
5 files changed, 15 insertions, 12 deletions
diff --git a/arch/x86/include/asm/current.h b/arch/x86/include/asm/current.h
index f988462d8b69..8ba2c0f8bcaf 100644
--- a/arch/x86/include/asm/current.h
+++ b/arch/x86/include/asm/current.h
@@ -14,9 +14,6 @@ struct task_struct;
struct pcpu_hot {
struct task_struct *current_task;
-#ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING
- u64 call_depth;
-#endif
unsigned long top_of_stack;
void *hardirq_stack_ptr;
u16 softirq_pending;
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index aee26bb8230f..44c6076fd22b 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -12,7 +12,6 @@
#include <asm/msr-index.h>
#include <asm/unwind_hints.h>
#include <asm/percpu.h>
-#include <asm/current.h>
/*
* Call depth tracking for Intel SKL CPUs to address the RSB underflow
@@ -78,21 +77,21 @@
#include <asm/asm-offsets.h>
#define CREDIT_CALL_DEPTH \
- movq $-1, PER_CPU_VAR(pcpu_hot + X86_call_depth);
+ movq $-1, PER_CPU_VAR(__x86_call_depth);
#define RESET_CALL_DEPTH \
xor %eax, %eax; \
bts $63, %rax; \
- movq %rax, PER_CPU_VAR(pcpu_hot + X86_call_depth);
+ movq %rax, PER_CPU_VAR(__x86_call_depth);
#define RESET_CALL_DEPTH_FROM_CALL \
movb $0xfc, %al; \
shl $56, %rax; \
- movq %rax, PER_CPU_VAR(pcpu_hot + X86_call_depth); \
+ movq %rax, PER_CPU_VAR(__x86_call_depth); \
CALL_THUNKS_DEBUG_INC_CALLS
#define INCREMENT_CALL_DEPTH \
- sarq $5, PER_CPU_VAR(pcpu_hot + X86_call_depth); \
+ sarq $5, PER_CPU_VAR(__x86_call_depth); \
CALL_THUNKS_DEBUG_INC_CALLS
#else
@@ -387,6 +386,8 @@ extern void call_depth_return_thunk(void);
__stringify(INCREMENT_CALL_DEPTH), \
X86_FEATURE_CALL_DEPTH)
+DECLARE_PER_CPU_CACHE_HOT(u64, __x86_call_depth);
+
#ifdef CONFIG_CALL_THUNKS_DEBUG
DECLARE_PER_CPU(u64, __x86_call_count);
DECLARE_PER_CPU(u64, __x86_ret_count);
diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
index a98020bf31bb..6fae88f8ae1e 100644
--- a/arch/x86/kernel/asm-offsets.c
+++ b/arch/x86/kernel/asm-offsets.c
@@ -109,9 +109,6 @@ static void __used common(void)
OFFSET(TSS_sp2, tss_struct, x86_tss.sp2);
OFFSET(X86_top_of_stack, pcpu_hot, top_of_stack);
OFFSET(X86_current_task, pcpu_hot, current_task);
-#ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING
- OFFSET(X86_call_depth, pcpu_hot, call_depth);
-#endif
#if IS_ENABLED(CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64)
/* Offset for fields in aria_ctx */
BLANK();
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index a9d61537d4a6..fd224ae57d62 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -2075,6 +2075,14 @@ DEFINE_PER_CPU_CACHE_HOT(int, __preempt_count) = INIT_PREEMPT_COUNT;
EXPORT_PER_CPU_SYMBOL(__preempt_count);
#ifdef CONFIG_X86_64
+/*
+ * Note: Do not make this dependant on CONFIG_MITIGATION_CALL_DEPTH_TRACKING
+ * so that this space is reserved in the hot cache section even when the
+ * mitigation is disabled.
+ */
+DEFINE_PER_CPU_CACHE_HOT(u64, __x86_call_depth);
+EXPORT_PER_CPU_SYMBOL(__x86_call_depth);
+
static void wrmsrl_cstar(unsigned long val)
{
/*
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
index 038f49a43ab4..a26c43abd47d 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -343,7 +343,7 @@ SYM_FUNC_START(call_depth_return_thunk)
* case.
*/
CALL_THUNKS_DEBUG_INC_RETS
- shlq $5, PER_CPU_VAR(pcpu_hot + X86_call_depth)
+ shlq $5, PER_CPU_VAR(__x86_call_depth)
jz 1f
ANNOTATE_UNRET_SAFE
ret