summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2020-03-18 23:48:04 +0100
committerThomas Gleixner <tglx@linutronix.de>2020-05-05 11:27:50 +0200
commit394dcdbf4d9d628f8b7da64b2b0e049954e784db (patch)
tree08a03690c40fbeead04a60841f8830f3f4ffbd4d
parent61210b6895950326f15088fd085e4b1ea62d82e2 (diff)
x86/kvm/svm: Move guest enter/exit into .noinstr.textentry-v4-part2
Move the functions which are inside the RCU off region into the non-instrumentable text section. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Sean Christopherson <sean.j.christopherson@intel.com>
-rw-r--r--arch/x86/kvm/svm/svm.c102
-rw-r--r--arch/x86/kvm/svm/vmenter.S2
2 files changed, 57 insertions, 47 deletions
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 29a13bf2442f..e68de114b832 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3278,6 +3278,61 @@ static void svm_cancel_injection(struct kvm_vcpu *vcpu)
void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
+static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu,
+ struct vcpu_svm *svm)
+{
+ /*
+ * VMENTER enables interrupts (host state), but the kernel state is
+ * interrupts disabled when this is invoked. Also tell RCU about
+ * it. This is the same logic as for exit_to_user_mode().
+ *
+ * 1) Trace interrupts on state
+ * 2) Prepare lockdep with RCU on
+ * 3) Invoke context tracking if enabled to adjust RCU state
+ * 4) Tell lockdep that interrupts are enabled
+ *
+ * This has to be after x86_spec_ctrl_set_guest() because that can
+ * take locks (lockdep needs RCU) and calls into world and some
+ * more.
+ */
+ instr_begin();
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+ instr_end();
+ guest_enter_irqoff();
+ lockdep_hardirqs_on(CALLER_ADDR0);
+
+ __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs);
+
+#ifdef CONFIG_X86_64
+ native_wrmsrl(MSR_GS_BASE, svm->host.gs_base);
+#else
+ loadsegment(fs, svm->host.fs);
+#ifndef CONFIG_X86_32_LAZY_GS
+ loadsegment(gs, svm->host.gs);
+#endif
+#endif
+
+ /*
+ * VMEXIT disables interrupts (host state, see the CLI in the ASM
+ * above), but tracing and lockdep have them in state 'on'. Same as
+ * enter_from_user_mode().
+ *
+ * 1) Tell lockdep that interrupts are disabled
+ * 2) Invoke context tracking if enabled to reactivate RCU
+ * 3) Trace interrupts off state
+ *
+ * This needs to be done before the below as native_read_msr()
+ * contains a tracepoint and x86_spec_ctrl_restore_host() calls
+ * into world and some more.
+ */
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ guest_exit_irqoff();
+ instr_begin();
+ trace_hardirqs_off_prepare();
+ instr_end();
+}
+
static void svm_vcpu_run(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -3330,52 +3385,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
*/
x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
- /*
- * VMENTER enables interrupts (host state), but the kernel state is
- * interrupts disabled when this is invoked. Also tell RCU about
- * it. This is the same logic as for exit_to_user_mode().
- *
- * 1) Trace interrupts on state
- * 2) Prepare lockdep with RCU on
- * 3) Invoke context tracking if enabled to adjust RCU state
- * 4) Tell lockdep that interrupts are enabled
- *
- * This has to be after x86_spec_ctrl_set_guest() because that can
- * take locks (lockdep needs RCU) and calls into world and some
- * more.
- */
- trace_hardirqs_on_prepare();
- lockdep_hardirqs_on_prepare(CALLER_ADDR0);
- guest_enter_irqoff();
- lockdep_hardirqs_on(CALLER_ADDR0);
-
- __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs);
-
-#ifdef CONFIG_X86_64
- wrmsrl(MSR_GS_BASE, svm->host.gs_base);
-#else
- loadsegment(fs, svm->host.fs);
-#ifndef CONFIG_X86_32_LAZY_GS
- loadsegment(gs, svm->host.gs);
-#endif
-#endif
-
- /*
- * VMEXIT disables interrupts (host state, see the CLI in the ASM
- * above), but tracing and lockdep have them in state 'on'. Same as
- * enter_from_user_mode().
- *
- * 1) Tell lockdep that interrupts are disabled
- * 2) Invoke context tracking if enabled to reactivate RCU
- * 3) Trace interrupts off state
- *
- * This needs to be done before the below as native_read_msr()
- * contains a tracepoint and x86_spec_ctrl_restore_host() calls
- * into world and some more.
- */
- lockdep_hardirqs_off(CALLER_ADDR0);
- guest_exit_irqoff();
- trace_hardirqs_off_prepare();
+ svm_vcpu_enter_exit(vcpu, svm);
/*
* We do not use IBRS in the kernel. If this vCPU has used the
diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S
index bf944334003a..1ec1ac40e328 100644
--- a/arch/x86/kvm/svm/vmenter.S
+++ b/arch/x86/kvm/svm/vmenter.S
@@ -27,7 +27,7 @@
#define VCPU_R15 __VCPU_REGS_R15 * WORD_SIZE
#endif
- .text
+.section .noinstr.text, "ax"
/**
* __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode