KVM: TDX: restore user ret MSRs

Several MSRs are clobbered on TD exit that are not used by Linux while
in ring 0.  Ensure the cached value of the MSR is updated on vcpu_put,
and the MSRs themselves before returning to ring 3.

Co-developed-by: Tony Lindgren <tony.lindgren@linux.intel.com>
Signed-off-by: Tony Lindgren <tony.lindgren@linux.intel.com>
Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-ID: <20250129095902.16391-10-adrian.hunter@intel.com>
Reviewed-by: Xiayao Li <xiaoyao.li@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Isaku Yamahata
2025-01-29 11:58:58 +02:00
committed by Paolo Bonzini
parent d3a6b6cfb8
commit e0b4f31a3c
2 changed files with 51 additions and 1 deletions

View File

@@ -651,9 +651,32 @@ void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
vt->guest_state_loaded = true;
}
struct tdx_uret_msr {
u32 msr;
unsigned int slot;
u64 defval;
};
static struct tdx_uret_msr tdx_uret_msrs[] = {
{.msr = MSR_SYSCALL_MASK, .defval = 0x20200 },
{.msr = MSR_STAR,},
{.msr = MSR_LSTAR,},
{.msr = MSR_TSC_AUX,},
};
static void tdx_user_return_msr_update_cache(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(tdx_uret_msrs); i++)
kvm_user_return_msr_update_cache(tdx_uret_msrs[i].slot,
tdx_uret_msrs[i].defval);
}
static void tdx_prepare_switch_to_host(struct kvm_vcpu *vcpu)
{
struct vcpu_vt *vt = to_vt(vcpu);
struct vcpu_tdx *tdx = to_tdx(vcpu);
if (!vt->guest_state_loaded)
return;
@@ -661,6 +684,11 @@ static void tdx_prepare_switch_to_host(struct kvm_vcpu *vcpu)
++vcpu->stat.host_state_reload;
wrmsrl(MSR_KERNEL_GS_BASE, vt->msr_host_kernel_gs_base);
if (tdx->guest_entered) {
tdx_user_return_msr_update_cache();
tdx->guest_entered = false;
}
vt->guest_state_loaded = false;
}
@@ -767,6 +795,8 @@ EXPORT_SYMBOL_GPL(kvm_load_host_xsave_state);
fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
{
struct vcpu_tdx *tdx = to_tdx(vcpu);
/*
* force_immediate_exit requires vCPU entering for events injection with
* an immediately exit followed. But The TDX module doesn't guarantee
@@ -782,6 +812,7 @@ fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
tdx_vcpu_enter_exit(vcpu);
tdx_load_host_xsave_state(vcpu);
tdx->guest_entered = true;
vcpu->arch.regs_avail &= TDX_REGS_AVAIL_SET;
@@ -2242,7 +2273,25 @@ static int __init __do_tdx_bringup(void)
static int __init __tdx_bringup(void)
{
const struct tdx_sys_info_td_conf *td_conf;
int r;
int r, i;
for (i = 0; i < ARRAY_SIZE(tdx_uret_msrs); i++) {
/*
* Check if MSRs (tdx_uret_msrs) can be saved/restored
* before returning to user space.
*
* this_cpu_ptr(user_return_msrs)->registered isn't checked
* because the registration is done at vcpu runtime by
* tdx_user_return_msr_update_cache().
*/
tdx_uret_msrs[i].slot = kvm_find_user_return_msr(tdx_uret_msrs[i].msr);
if (tdx_uret_msrs[i].slot == -1) {
/* If any MSR isn't supported, it is a KVM bug */
pr_err("MSR %x isn't included by kvm_find_user_return_msr\n",
tdx_uret_msrs[i].msr);
return -EIO;
}
}
/*
* Enabling TDX requires enabling hardware virtualization first,

View File

@@ -57,6 +57,7 @@ struct vcpu_tdx {
u64 vp_enter_ret;
enum vcpu_tdx_state state;
bool guest_entered;
};
void tdh_vp_rd_failed(struct vcpu_tdx *tdx, char *uclass, u32 field, u64 err);