mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
KVM: VMX: Handle #MCs on VM-Enter/TD-Enter outside of the fastpath
Handle Machine Checks (#MC) that happen on VM-Enter (VMX or TDX) outside of KVM's fastpath so that as much host state as possible is re-loaded before invoking the kernel's #MC handler. The only requirement is that KVM invokes the #MC handler before enabling IRQs (and even that could _probably_ be related to handling #MCs before enabling preemption). Waiting to handle #MCs until "more" host state is loaded hardens KVM against flaws in the #MC handler, which has historically been quite brittle. E.g. prior to commit5567d11c21("x86/mce: Send #MC singal from task work"), the #MC code could trigger a schedule() with IRQs and preemption disabled. That led to a KVM hack-a-fix in commit1811d979c7("x86/kvm: move kvm_load/put_guest_xcr0 into atomic context"). Note, vmx_handle_exit_irqoff() is common to VMX and TDX guests. Cc: Tony Lindgren <tony.lindgren@linux.intel.com> Cc: Rick Edgecombe <rick.p.edgecombe@intel.com> Cc: Jon Kohler <jon@nutanix.com> Reviewed-by: Tony Lindgren <tony.lindgren@linux.intel.com> Link: https://patch.msgid.link/20251118222328.2265758-3-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
@@ -1063,9 +1063,6 @@ fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
|
||||
if (unlikely((tdx->vp_enter_ret & TDX_SW_ERROR) == TDX_SW_ERROR))
|
||||
return EXIT_FASTPATH_NONE;
|
||||
|
||||
if (unlikely(vmx_get_exit_reason(vcpu).basic == EXIT_REASON_MCE_DURING_VMENTRY))
|
||||
kvm_machine_check();
|
||||
|
||||
trace_kvm_exit(vcpu, KVM_ISA_VMX);
|
||||
|
||||
if (unlikely(tdx_failed_vmentry(vcpu)))
|
||||
|
||||
@@ -7074,10 +7074,19 @@ void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
|
||||
if (to_vt(vcpu)->emulation_required)
|
||||
return;
|
||||
|
||||
if (vmx_get_exit_reason(vcpu).basic == EXIT_REASON_EXTERNAL_INTERRUPT)
|
||||
switch (vmx_get_exit_reason(vcpu).basic) {
|
||||
case EXIT_REASON_EXTERNAL_INTERRUPT:
|
||||
handle_external_interrupt_irqoff(vcpu, vmx_get_intr_info(vcpu));
|
||||
else if (vmx_get_exit_reason(vcpu).basic == EXIT_REASON_EXCEPTION_NMI)
|
||||
break;
|
||||
case EXIT_REASON_EXCEPTION_NMI:
|
||||
handle_exception_irqoff(vcpu, vmx_get_intr_info(vcpu));
|
||||
break;
|
||||
case EXIT_REASON_MCE_DURING_VMENTRY:
|
||||
kvm_machine_check();
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -7526,9 +7535,6 @@ fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
|
||||
if (unlikely(vmx->fail))
|
||||
return EXIT_FASTPATH_NONE;
|
||||
|
||||
if (unlikely((u16)vmx_get_exit_reason(vcpu).basic == EXIT_REASON_MCE_DURING_VMENTRY))
|
||||
kvm_machine_check();
|
||||
|
||||
trace_kvm_exit(vcpu, KVM_ISA_VMX);
|
||||
|
||||
if (unlikely(vmx_get_exit_reason(vcpu).failed_vmentry))
|
||||
|
||||
Reference in New Issue
Block a user