diff options
-rw-r--r-- | arch/x86/kvm/vmx/main.c | 2 | ||||
-rw-r--r-- | arch/x86/kvm/vmx/posted_intr.c | 3 | ||||
-rw-r--r-- | arch/x86/kvm/vmx/tdx.c | 39 | ||||
-rw-r--r-- | arch/x86/kvm/vmx/tdx.h | 7 | ||||
-rw-r--r-- | arch/x86/kvm/vmx/tdx_arch.h | 11 |
5 files changed, 56 insertions, 6 deletions
diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c index 67d51925b2f6..8d6a8ce58b69 100644 --- a/arch/x86/kvm/vmx/main.c +++ b/arch/x86/kvm/vmx/main.c @@ -418,7 +418,7 @@ static void vt_cancel_injection(struct kvm_vcpu *vcpu) static int vt_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection) { if (is_td_vcpu(vcpu)) - return true; + return tdx_interrupt_allowed(vcpu); return vmx_interrupt_allowed(vcpu, for_injection); } diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c index 895bbe85b818..f2ca37b3f606 100644 --- a/arch/x86/kvm/vmx/posted_intr.c +++ b/arch/x86/kvm/vmx/posted_intr.c @@ -203,7 +203,8 @@ void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu) return; if (kvm_vcpu_is_blocking(vcpu) && - (is_td_vcpu(vcpu) || !vmx_interrupt_blocked(vcpu))) + ((is_td_vcpu(vcpu) && tdx_interrupt_allowed(vcpu)) || + (!is_td_vcpu(vcpu) && !vmx_interrupt_blocked(vcpu)))) pi_enable_wakeup_handler(vcpu); /* diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c index f0e11c24b4e6..8ce69831ee53 100644 --- a/arch/x86/kvm/vmx/tdx.c +++ b/arch/x86/kvm/vmx/tdx.c @@ -726,9 +726,39 @@ void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) local_irq_enable(); } +bool tdx_interrupt_allowed(struct kvm_vcpu *vcpu) +{ + /* + * KVM can't get the interrupt status of TDX guest and it assumes + * interrupt is always allowed unless TDX guest calls TDVMCALL with HLT, + * which passes the interrupt blocked flag. + */ + return vmx_get_exit_reason(vcpu).basic != EXIT_REASON_HLT || + !to_tdx(vcpu)->vp_enter_args.r12; +} + bool tdx_protected_apic_has_interrupt(struct kvm_vcpu *vcpu) { - return pi_has_pending_interrupt(vcpu); + u64 vcpu_state_details; + + if (pi_has_pending_interrupt(vcpu)) + return true; + + /* + * Only check RVI pending for HALTED case with IRQ enabled. + * For non-HLT cases, KVM doesn't care about STI/SS shadows. And if the + * interrupt was pending before TD exit, then it _must_ be blocked, + * otherwise the interrupt would have been serviced at the instruction + * boundary. + */ + if (vmx_get_exit_reason(vcpu).basic != EXIT_REASON_HLT || + to_tdx(vcpu)->vp_enter_args.r12) + return false; + + vcpu_state_details = + td_state_non_arch_read64(to_tdx(vcpu), TD_VCPU_STATE_DETAILS_NON_ARCH); + + return tdx_vcpu_state_details_intr_pending(vcpu_state_details); } /* @@ -845,6 +875,7 @@ static __always_inline u32 tdcall_to_vmx_exit_reason(struct kvm_vcpu *vcpu) { switch (tdvmcall_leaf(vcpu)) { case EXIT_REASON_CPUID: + case EXIT_REASON_HLT: case EXIT_REASON_IO_INSTRUCTION: return tdvmcall_leaf(vcpu); case EXIT_REASON_EPT_VIOLATION: @@ -1129,9 +1160,7 @@ static int tdx_complete_vmcall_map_gpa(struct kvm_vcpu *vcpu) /* * Stop processing the remaining part if there is a pending interrupt, * which could be qualified to deliver. Skip checking pending RVI for - * TDVMCALL_MAP_GPA. - * TODO: Add a comment to link the reason when the target function is - * implemented. + * TDVMCALL_MAP_GPA, see comments in tdx_protected_apic_has_interrupt(). */ if (kvm_vcpu_has_events(vcpu)) { tdvmcall_set_return_code(vcpu, TDVMCALL_STATUS_RETRY); @@ -1934,6 +1963,8 @@ int tdx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t fastpath) return 1; case EXIT_REASON_CPUID: return tdx_emulate_cpuid(vcpu); + case EXIT_REASON_HLT: + return kvm_emulate_halt_noskip(vcpu); case EXIT_REASON_TDCALL: return handle_tdvmcall(vcpu); case EXIT_REASON_VMCALL: diff --git a/arch/x86/kvm/vmx/tdx.h b/arch/x86/kvm/vmx/tdx.h index 591fc09f3a8a..51983743b375 100644 --- a/arch/x86/kvm/vmx/tdx.h +++ b/arch/x86/kvm/vmx/tdx.h @@ -123,6 +123,7 @@ static __always_inline void tdvps_vmcs_check(u32 field, u8 bits) } static __always_inline void tdvps_management_check(u64 field, u8 bits) {} +static __always_inline void tdvps_state_non_arch_check(u64 field, u8 bits) {} #define TDX_BUILD_TDVPS_ACCESSORS(bits, uclass, lclass) \ static __always_inline u##bits td_##lclass##_read##bits(struct vcpu_tdx *tdx, \ @@ -170,11 +171,15 @@ static __always_inline void td_##lclass##_clearbit##bits(struct vcpu_tdx *tdx, \ tdh_vp_wr_failed(tdx, #uclass, " &= ~", field, bit, err);\ } + +bool tdx_interrupt_allowed(struct kvm_vcpu *vcpu); + TDX_BUILD_TDVPS_ACCESSORS(16, VMCS, vmcs); TDX_BUILD_TDVPS_ACCESSORS(32, VMCS, vmcs); TDX_BUILD_TDVPS_ACCESSORS(64, VMCS, vmcs); TDX_BUILD_TDVPS_ACCESSORS(8, MANAGEMENT, management); +TDX_BUILD_TDVPS_ACCESSORS(64, STATE_NON_ARCH, state_non_arch); #else static inline int tdx_bringup(void) { return 0; } @@ -190,6 +195,8 @@ struct vcpu_tdx { struct kvm_vcpu vcpu; }; +static inline bool tdx_interrupt_allowed(struct kvm_vcpu *vcpu) { return false; } + #endif #endif diff --git a/arch/x86/kvm/vmx/tdx_arch.h b/arch/x86/kvm/vmx/tdx_arch.h index 6cf68ea6965b..a30e880849e3 100644 --- a/arch/x86/kvm/vmx/tdx_arch.h +++ b/arch/x86/kvm/vmx/tdx_arch.h @@ -37,6 +37,17 @@ enum tdx_tdcs_execution_control { TD_TDCS_EXEC_TSC_MULTIPLIER = 11, }; +enum tdx_vcpu_guest_other_state { + TD_VCPU_STATE_DETAILS_NON_ARCH = 0x100, +}; + +#define TDX_VCPU_STATE_DETAILS_INTR_PENDING BIT_ULL(0) + +static inline bool tdx_vcpu_state_details_intr_pending(u64 vcpu_state_details) +{ + return !!(vcpu_state_details & TDX_VCPU_STATE_DETAILS_INTR_PENDING); +} + /* @field is any of enum tdx_tdcs_execution_control */ #define TDCS_EXEC(field) BUILD_TDX_FIELD(TD_CLASS_EXECUTION_CONTROLS, (field)) |