Merge tag 'kvm-x86-misc-6.19' of https://github.com/kvm-x86/linux into HEAD

KVM x86 misc changes for 6.19:

 - Fix an async #PF bug where KVM would clear the completion queue when the
   guest transitioned in and out of paging mode, e.g. when handling an SMI and
   then returning to paged mode via RSM.

 - Fix a bug where TDX would effectively corrupt user-return MSR values if the
   TDX Module rejects VP.ENTER and thus doesn't clobber host MSRs as expected.

 - Leave the user-return notifier used to restore MSRs registered when
   disabling virtualization, and instead pin kvm.ko.  Restoring host MSRs via
   IPI callback is either pointless (clean reboot) or dangerous (forced reboot)
   since KVM has no idea what code it's interrupting.

 - Use the checked version of {get,put}_user(), as Linus wants to kill them
   off, and they're measurably faster on modern CPUs due to the unchecked
   versions containing an LFENCE.

 - Fix a long-lurking bug where KVM's lack of catch-up logic for periodic APIC
   timers can result in a hard lockup in the host.

 - Revert the periodic kvmclock sync logic now that KVM doesn't use a
   clocksource that's subject to NPT corrections.

 - Clean up KVM's handling of MMIO Stale Data and L1TF, and bury the latter
   behind CONFIG_CPU_MITIGATIONS.

 - Context switch XCR0, XSS, and PKRU outside of the entry/exit fastpath as
   the only reason they were handled in the faspath was to paper of a bug in
   the core #MC code that has long since been fixed.

 - Add emulator support for AVX MOV instructions to play nice with emulated
   devices whose PCI BARs guest drivers like to access with large multi-byte
   instructions.
This commit is contained in:
Paolo Bonzini
2025-11-26 09:34:21 +01:00
24 changed files with 702 additions and 496 deletions

View File

@@ -499,6 +499,11 @@
#define X86_FEATURE_IBPB_EXIT_TO_USER (21*32+14) /* Use IBPB on exit-to-userspace, see VMSCAPE bug */
#define X86_FEATURE_ABMC (21*32+15) /* Assignable Bandwidth Monitoring Counters */
#define X86_FEATURE_MSR_IMM (21*32+16) /* MSR immediate form instructions */
#define X86_FEATURE_CLEAR_CPU_BUF_VM_MMIO (21*32+17) /*
* Clear CPU buffers before VM-Enter if the vCPU
* can access host MMIO (ignored for all intents
* and purposes if CLEAR_CPU_BUF_VM is set).
*/
/*
* BUG word(s)

View File

@@ -5,7 +5,7 @@
#include <linux/threads.h>
typedef struct {
#if IS_ENABLED(CONFIG_KVM_INTEL)
#if IS_ENABLED(CONFIG_CPU_MITIGATIONS) && IS_ENABLED(CONFIG_KVM_INTEL)
u8 kvm_cpu_l1tf_flush_l1d;
#endif
unsigned int __nmi_count; /* arch dependent */
@@ -68,7 +68,7 @@ extern u64 arch_irq_stat(void);
DECLARE_PER_CPU_CACHE_HOT(u16, __softirq_pending);
#define local_softirq_pending_ref __softirq_pending
#if IS_ENABLED(CONFIG_KVM_INTEL)
#if IS_ENABLED(CONFIG_CPU_MITIGATIONS) && IS_ENABLED(CONFIG_KVM_INTEL)
/*
* This function is called from noinstr interrupt contexts
* and must be inlined to not get instrumentation.

View File

@@ -1055,9 +1055,6 @@ struct kvm_vcpu_arch {
/* be preempted when it's in kernel-mode(cpl=0) */
bool preempted_in_kernel;
/* Flush the L1 Data cache for L1TF mitigation on VMENTER */
bool l1tf_flush_l1d;
/* Host CPU on which VM-entry was most recently attempted */
int last_vmentry_cpu;
@@ -1456,8 +1453,6 @@ struct kvm_arch {
bool use_master_clock;
u64 master_kernel_ns;
u64 master_cycle_now;
struct delayed_work kvmclock_update_work;
struct delayed_work kvmclock_sync_work;
#ifdef CONFIG_KVM_HYPERV
struct kvm_hv hyperv;
@@ -2167,6 +2162,7 @@ void __kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu,
void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu);
void kvm_prepare_event_vectoring_exit(struct kvm_vcpu *vcpu, gpa_t gpa);
void kvm_prepare_unexpected_reason_exit(struct kvm_vcpu *vcpu, u64 exit_reason);
void kvm_enable_efer_bits(u64);
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
@@ -2378,7 +2374,6 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
int kvm_add_user_return_msr(u32 msr);
int kvm_find_user_return_msr(u32 msr);
int kvm_set_user_return_msr(unsigned index, u64 val, u64 mask);
void kvm_user_return_msr_update_cache(unsigned int index, u64 val);
u64 kvm_get_user_return_msr(unsigned int slot);
static inline bool kvm_is_supported_user_return_msr(u32 msr)

View File

@@ -308,24 +308,26 @@
* CFLAGS.ZF.
* Note: Only the memory operand variant of VERW clears the CPU buffers.
*/
.macro __CLEAR_CPU_BUFFERS feature
#ifdef CONFIG_X86_64
ALTERNATIVE "", "verw x86_verw_sel(%rip)", \feature
#define VERW verw x86_verw_sel(%rip)
#else
/*
* In 32bit mode, the memory operand must be a %cs reference. The data
* segments may not be usable (vm86 mode), and the stack segment may not
* be flat (ESPFIX32).
*/
ALTERNATIVE "", "verw %cs:x86_verw_sel", \feature
/*
* In 32bit mode, the memory operand must be a %cs reference. The data segments
* may not be usable (vm86 mode), and the stack segment may not be flat (ESPFIX32).
*/
#define VERW verw %cs:x86_verw_sel
#endif
.endm
/*
* Provide a stringified VERW macro for simple usage, and a non-stringified
* VERW macro for use in more elaborate sequences, e.g. to encode a conditional
* VERW within an ALTERNATIVE.
*/
#define __CLEAR_CPU_BUFFERS __stringify(VERW)
/* If necessary, emit VERW on exit-to-userspace to clear CPU buffers. */
#define CLEAR_CPU_BUFFERS \
__CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF
#define VM_CLEAR_CPU_BUFFERS \
__CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF_VM
ALTERNATIVE "", __CLEAR_CPU_BUFFERS, X86_FEATURE_CLEAR_CPU_BUF
#ifdef CONFIG_X86_64
.macro CLEAR_BRANCH_HISTORY
@@ -580,8 +582,6 @@ DECLARE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
DECLARE_STATIC_KEY_FALSE(cpu_buf_vm_clear);
extern u16 x86_verw_sel;
#include <asm/segment.h>

View File

@@ -192,14 +192,6 @@ EXPORT_SYMBOL_GPL(cpu_buf_idle_clear);
*/
DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
/*
* Controls CPU Fill buffer clear before VMenter. This is a subset of
* X86_FEATURE_CLEAR_CPU_BUF, and should only be enabled when KVM-only
* mitigation is required.
*/
DEFINE_STATIC_KEY_FALSE(cpu_buf_vm_clear);
EXPORT_SYMBOL_GPL(cpu_buf_vm_clear);
#undef pr_fmt
#define pr_fmt(fmt) "mitigations: " fmt
@@ -489,8 +481,8 @@ static enum rfds_mitigations rfds_mitigation __ro_after_init =
IS_ENABLED(CONFIG_MITIGATION_RFDS) ? RFDS_MITIGATION_AUTO : RFDS_MITIGATION_OFF;
/*
* Set if any of MDS/TAA/MMIO/RFDS are going to enable VERW clearing
* through X86_FEATURE_CLEAR_CPU_BUF on kernel and guest entry.
* Set if any of MDS/TAA/MMIO/RFDS are going to enable VERW clearing on exit to
* userspace *and* on entry to KVM guests.
*/
static bool verw_clear_cpu_buf_mitigation_selected __ro_after_init;
@@ -536,6 +528,7 @@ static void __init mds_apply_mitigation(void)
if (mds_mitigation == MDS_MITIGATION_FULL ||
mds_mitigation == MDS_MITIGATION_VMWERV) {
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
(mds_nosmt || smt_mitigations == SMT_MITIGATIONS_ON))
cpu_smt_disable(false);
@@ -647,6 +640,7 @@ static void __init taa_apply_mitigation(void)
* present on host, enable the mitigation for UCODE_NEEDED as well.
*/
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
if (taa_nosmt || smt_mitigations == SMT_MITIGATIONS_ON)
cpu_smt_disable(false);
@@ -748,9 +742,9 @@ static void __init mmio_apply_mitigation(void)
*/
if (verw_clear_cpu_buf_mitigation_selected) {
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
static_branch_disable(&cpu_buf_vm_clear);
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
} else {
static_branch_enable(&cpu_buf_vm_clear);
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM_MMIO);
}
/*
@@ -839,8 +833,10 @@ static void __init rfds_update_mitigation(void)
static void __init rfds_apply_mitigation(void)
{
if (rfds_mitigation == RFDS_MITIGATION_VERW)
if (rfds_mitigation == RFDS_MITIGATION_VERW) {
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
}
}
static __init int rfds_parse_cmdline(char *str)

View File

@@ -81,9 +81,8 @@
*/
/* Operand sizes: 8-bit operands or specified/overridden size. */
#define ByteOp (1<<0) /* 8-bit operands. */
/* Destination operand type. */
#define DstShift 1
#define ByteOp (1<<0) /* 8-bit operands. */
#define DstShift 1 /* Destination operand type at bits 1-5 */
#define ImplicitOps (OpImplicit << DstShift)
#define DstReg (OpReg << DstShift)
#define DstMem (OpMem << DstShift)
@@ -95,8 +94,7 @@
#define DstDX (OpDX << DstShift)
#define DstAccLo (OpAccLo << DstShift)
#define DstMask (OpMask << DstShift)
/* Source operand type. */
#define SrcShift 6
#define SrcShift 6 /* Source operand type at bits 6-10 */
#define SrcNone (OpNone << SrcShift)
#define SrcReg (OpReg << SrcShift)
#define SrcMem (OpMem << SrcShift)
@@ -119,10 +117,10 @@
#define SrcAccHi (OpAccHi << SrcShift)
#define SrcMask (OpMask << SrcShift)
#define BitOp (1<<11)
#define MemAbs (1<<12) /* Memory operand is absolute displacement */
#define MemAbs (1<<12) /* Memory operand is absolute displacement */
#define String (1<<13) /* String instruction (rep capable) */
#define Stack (1<<14) /* Stack instruction (push/pop) */
#define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
#define GroupMask (7<<15) /* Group mechanisms, at bits 15-17 */
#define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
#define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
@@ -131,11 +129,8 @@
#define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
#define ModeDual (7<<15) /* Different instruction for 32/64 bit */
#define Sse (1<<18) /* SSE Vector instruction */
/* Generic ModRM decode. */
#define ModRM (1<<19)
/* Destination is only written; never read. */
#define Mov (1<<20)
/* Misc flags */
#define ModRM (1<<19) /* Generic ModRM decode. */
#define Mov (1<<20) /* Destination is only written; never read. */
#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
#define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
@@ -143,11 +138,11 @@
#define Undefined (1<<25) /* No Such Instruction */
#define Lock (1<<26) /* lock prefix is allowed for the instruction */
#define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
#define No64 (1<<28)
#define No64 (1<<28) /* Instruction generates #UD in 64-bit mode */
#define PageTable (1 << 29) /* instruction used to write page table */
#define NotImpl (1 << 30) /* instruction is not implemented */
/* Source 2 operand type */
#define Src2Shift (31)
#define Avx ((u64)1 << 31) /* Instruction uses VEX prefix */
#define Src2Shift (32) /* Source 2 operand type at bits 32-36 */
#define Src2None (OpNone << Src2Shift)
#define Src2Mem (OpMem << Src2Shift)
#define Src2CL (OpCL << Src2Shift)
@@ -161,12 +156,13 @@
#define Src2FS (OpFS << Src2Shift)
#define Src2GS (OpGS << Src2Shift)
#define Src2Mask (OpMask << Src2Shift)
/* free: 37-39 */
#define Mmx ((u64)1 << 40) /* MMX Vector instruction */
#define AlignMask ((u64)7 << 41)
#define AlignMask ((u64)3 << 41) /* Memory alignment requirement at bits 41-42 */
#define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
#define Unaligned ((u64)2 << 41) /* Explicitly unaligned (e.g. MOVDQU) */
#define Avx ((u64)3 << 41) /* Advanced Vector Extensions */
#define Aligned16 ((u64)4 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
#define Aligned16 ((u64)3 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
/* free: 43-44 */
#define NoWrite ((u64)1 << 45) /* No writeback */
#define SrcWrite ((u64)1 << 46) /* Write back src operand */
#define NoMod ((u64)1 << 47) /* Mod field is ignored */
@@ -243,6 +239,13 @@ enum x86_transfer_type {
X86_TRANSFER_TASK_SWITCH,
};
enum rex_bits {
REX_B = 1,
REX_X = 2,
REX_R = 4,
REX_W = 8,
};
static void writeback_registers(struct x86_emulate_ctxt *ctxt)
{
unsigned long dirty = ctxt->regs_dirty;
@@ -622,7 +625,6 @@ static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
switch (alignment) {
case Unaligned:
case Avx:
return 1;
case Aligned16:
return 16;
@@ -924,7 +926,7 @@ static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
int byteop)
{
void *p;
int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
int highbyte_regs = (ctxt->rex_prefix == REX_NONE) && byteop;
if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
@@ -1030,6 +1032,7 @@ static void fetch_register_operand(struct operand *op)
op->val = *(u64 *)op->addr.reg;
break;
}
op->orig_val = op->val;
}
static int em_fninit(struct x86_emulate_ctxt *ctxt)
@@ -1075,17 +1078,17 @@ static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}
static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
struct operand *op)
static void __decode_register_operand(struct x86_emulate_ctxt *ctxt,
struct operand *op, int reg)
{
unsigned int reg;
if (ctxt->d & ModRM)
reg = ctxt->modrm_reg;
else
reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
if (ctxt->d & Sse) {
if ((ctxt->d & Avx) && ctxt->op_bytes == 32) {
op->type = OP_YMM;
op->bytes = 32;
op->addr.xmm = reg;
kvm_read_avx_reg(reg, &op->vec_val2);
return;
}
if (ctxt->d & (Avx|Sse)) {
op->type = OP_XMM;
op->bytes = 16;
op->addr.xmm = reg;
@@ -1103,9 +1106,20 @@ static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
op->type = OP_REG;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
fetch_register_operand(op);
op->orig_val = op->val;
}
static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
struct operand *op)
{
unsigned int reg;
if (ctxt->d & ModRM)
reg = ctxt->modrm_reg;
else
reg = (ctxt->b & 7) | (ctxt->rex_bits & REX_B ? 8 : 0);
__decode_register_operand(ctxt, op, reg);
}
static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
@@ -1122,9 +1136,9 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
int rc = X86EMUL_CONTINUE;
ulong modrm_ea = 0;
ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
ctxt->modrm_reg = (ctxt->rex_bits & REX_R ? 8 : 0);
index_reg = (ctxt->rex_bits & REX_X ? 8 : 0);
base_reg = (ctxt->rex_bits & REX_B ? 8 : 0);
ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
@@ -1132,24 +1146,7 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
ctxt->modrm_seg = VCPU_SREG_DS;
if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
op->type = OP_REG;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
ctxt->d & ByteOp);
if (ctxt->d & Sse) {
op->type = OP_XMM;
op->bytes = 16;
op->addr.xmm = ctxt->modrm_rm;
kvm_read_sse_reg(ctxt->modrm_rm, &op->vec_val);
return rc;
}
if (ctxt->d & Mmx) {
op->type = OP_MM;
op->bytes = 8;
op->addr.mm = ctxt->modrm_rm & 7;
return rc;
}
fetch_register_operand(op);
__decode_register_operand(ctxt, op, ctxt->modrm_rm);
return rc;
}
@@ -1783,7 +1780,15 @@ static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
op->data,
op->bytes * op->count);
case OP_XMM:
kvm_write_sse_reg(op->addr.xmm, &op->vec_val);
if (!(ctxt->d & Avx)) {
kvm_write_sse_reg(op->addr.xmm, &op->vec_val);
break;
}
/* full YMM write but with high bytes cleared */
memset(op->valptr + 16, 0, 16);
fallthrough;
case OP_YMM:
kvm_write_avx_reg(op->addr.xmm, &op->vec_val2);
break;
case OP_MM:
kvm_write_mmx_reg(op->addr.mm, &op->mm_val);
@@ -2466,7 +2471,7 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
setup_syscalls_segments(&cs, &ss);
if ((ctxt->rex_prefix & 0x8) != 0x0)
if (ctxt->rex_bits & REX_W)
usermode = X86EMUL_MODE_PROT64;
else
usermode = X86EMUL_MODE_PROT32;
@@ -3958,6 +3963,8 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt)
I2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
I2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
static const struct opcode ud = I(SrcNone, emulate_ud);
static const struct opcode group7_rm0[] = {
N,
I(SrcNone | Priv | EmulateOnUD, em_hypercall),
@@ -4114,7 +4121,7 @@ static const struct group_dual group15 = { {
} };
static const struct gprefix pfx_0f_6f_0f_7f = {
I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
I(Mmx, em_mov), I(Sse | Avx | Aligned, em_mov), N, I(Sse | Avx | Unaligned, em_mov),
};
static const struct instr_dual instr_dual_0f_2b = {
@@ -4133,8 +4140,8 @@ static const struct gprefix pfx_0f_28_0f_29 = {
I(Aligned, em_mov), I(Aligned, em_mov), N, N,
};
static const struct gprefix pfx_0f_e7 = {
N, I(Sse, em_mov), N, N,
static const struct gprefix pfx_0f_e7_0f_38_2a = {
N, I(Sse | Avx, em_mov), N, N,
};
static const struct escape escape_d9 = { {
@@ -4347,8 +4354,8 @@ static const struct opcode twobyte_table[256] = {
DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
/* 0x10 - 0x1F */
GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_10_0f_11),
GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_10_0f_11),
GP(ModRM | DstReg | SrcMem | Mov | Sse | Avx, &pfx_0f_10_0f_11),
GP(ModRM | DstMem | SrcReg | Mov | Sse | Avx, &pfx_0f_10_0f_11),
N, N, N, N, N, N,
D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 4 * prefetch + 4 * reserved NOP */
D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
@@ -4364,9 +4371,9 @@ static const struct opcode twobyte_table[256] = {
IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
check_dr_write),
N, N, N, N,
GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
GP(ModRM | DstReg | SrcMem | Mov | Sse | Avx, &pfx_0f_28_0f_29),
GP(ModRM | DstMem | SrcReg | Mov | Sse | Avx, &pfx_0f_28_0f_29),
N, GP(ModRM | DstMem | SrcReg | Mov | Sse | Avx, &pfx_0f_2b),
N, N, N, N,
/* 0x30 - 0x3F */
II(ImplicitOps | Priv, em_wrmsr, wrmsr),
@@ -4431,7 +4438,7 @@ static const struct opcode twobyte_table[256] = {
/* 0xD0 - 0xDF */
N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
/* 0xE0 - 0xEF */
N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7_0f_38_2a),
N, N, N, N, N, N, N, N,
/* 0xF0 - 0xFF */
N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
@@ -4458,8 +4465,13 @@ static const struct gprefix three_byte_0f_38_f1 = {
* byte.
*/
static const struct opcode opcode_map_0f_38[256] = {
/* 0x00 - 0x7f */
X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
/* 0x00 - 0x1f */
X16(N), X16(N),
/* 0x20 - 0x2f */
X8(N),
X2(N), GP(SrcReg | DstMem | ModRM | Mov | Aligned, &pfx_0f_e7_0f_38_2a), N, N, N, N, N,
/* 0x30 - 0x7f */
X16(N), X16(N), X16(N), X16(N), X16(N),
/* 0x80 - 0xef */
X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
/* 0xf0 - 0xf1 */
@@ -4618,14 +4630,12 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
fetch_register_operand(op);
op->orig_val = op->val;
break;
case OpAccLo:
op->type = OP_REG;
op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
fetch_register_operand(op);
op->orig_val = op->val;
break;
case OpAccHi:
if (ctxt->d & ByteOp) {
@@ -4636,7 +4646,6 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
op->bytes = ctxt->op_bytes;
op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
fetch_register_operand(op);
op->orig_val = op->val;
break;
case OpDI:
op->type = OP_MEM;
@@ -4755,12 +4764,87 @@ done:
return rc;
}
static int x86_decode_avx(struct x86_emulate_ctxt *ctxt,
u8 vex_1st, u8 vex_2nd, struct opcode *opcode)
{
u8 vex_3rd, map, pp, l, v;
int rc = X86EMUL_CONTINUE;
if (ctxt->rep_prefix || ctxt->op_prefix || ctxt->rex_prefix)
goto ud;
if (vex_1st == 0xc5) {
/* Expand RVVVVlpp to VEX3 format */
vex_3rd = vex_2nd & ~0x80; /* VVVVlpp from VEX2, w=0 */
vex_2nd = (vex_2nd & 0x80) | 0x61; /* R from VEX2, X=1 B=1 mmmmm=00001 */
} else {
vex_3rd = insn_fetch(u8, ctxt);
}
/* vex_2nd = RXBmmmmm, vex_3rd = wVVVVlpp. Fix polarity */
vex_2nd ^= 0xE0; /* binary 11100000 */
vex_3rd ^= 0x78; /* binary 01111000 */
ctxt->rex_prefix = REX_PREFIX;
ctxt->rex_bits = (vex_2nd & 0xE0) >> 5; /* RXB */
ctxt->rex_bits |= (vex_3rd & 0x80) >> 4; /* w */
if (ctxt->rex_bits && ctxt->mode != X86EMUL_MODE_PROT64)
goto ud;
map = vex_2nd & 0x1f;
v = (vex_3rd >> 3) & 0xf;
l = vex_3rd & 0x4;
pp = vex_3rd & 0x3;
ctxt->b = insn_fetch(u8, ctxt);
switch (map) {
case 1:
ctxt->opcode_len = 2;
*opcode = twobyte_table[ctxt->b];
break;
case 2:
ctxt->opcode_len = 3;
*opcode = opcode_map_0f_38[ctxt->b];
break;
case 3:
/* no 0f 3a instructions are supported yet */
return X86EMUL_UNHANDLEABLE;
default:
goto ud;
}
/*
* No three operand instructions are supported yet; those that
* *are* marked with the Avx flag reserve the VVVV flag.
*/
if (v)
goto ud;
if (l)
ctxt->op_bytes = 32;
else
ctxt->op_bytes = 16;
switch (pp) {
case 0: break;
case 1: ctxt->op_prefix = true; break;
case 2: ctxt->rep_prefix = 0xf3; break;
case 3: ctxt->rep_prefix = 0xf2; break;
}
done:
return rc;
ud:
*opcode = ud;
return rc;
}
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type)
{
int rc = X86EMUL_CONTINUE;
int mode = ctxt->mode;
int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
bool op_prefix = false;
bool vex_prefix = false;
bool has_seg_override = false;
struct opcode opcode;
u16 dummy;
@@ -4812,7 +4896,7 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int
for (;;) {
switch (ctxt->b = insn_fetch(u8, ctxt)) {
case 0x66: /* operand-size override */
op_prefix = true;
ctxt->op_prefix = true;
/* switch between 2/4 bytes */
ctxt->op_bytes = def_op_bytes ^ 6;
break;
@@ -4851,7 +4935,8 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int
case 0x40 ... 0x4f: /* REX */
if (mode != X86EMUL_MODE_PROT64)
goto done_prefixes;
ctxt->rex_prefix = ctxt->b;
ctxt->rex_prefix = REX_PREFIX;
ctxt->rex_bits = ctxt->b & 0xf;
continue;
case 0xf0: /* LOCK */
ctxt->lock_prefix = 1;
@@ -4865,20 +4950,33 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int
}
/* Any legacy prefix after a REX prefix nullifies its effect. */
ctxt->rex_prefix = 0;
ctxt->rex_prefix = REX_NONE;
ctxt->rex_bits = 0;
}
done_prefixes:
/* REX prefix. */
if (ctxt->rex_prefix & 8)
ctxt->op_bytes = 8; /* REX.W */
if (ctxt->rex_bits & REX_W)
ctxt->op_bytes = 8;
/* Opcode byte(s). */
opcode = opcode_table[ctxt->b];
/* Two-byte opcode? */
if (ctxt->b == 0x0f) {
if (ctxt->b == 0xc4 || ctxt->b == 0xc5) {
/* VEX or LDS/LES */
u8 vex_2nd = insn_fetch(u8, ctxt);
if (mode != X86EMUL_MODE_PROT64 && (vex_2nd & 0xc0) != 0xc0) {
opcode = opcode_table[ctxt->b];
ctxt->modrm = vex_2nd;
/* the Mod/RM byte has been fetched already! */
goto done_modrm;
}
vex_prefix = true;
rc = x86_decode_avx(ctxt, ctxt->b, vex_2nd, &opcode);
if (rc != X86EMUL_CONTINUE)
goto done;
} else if (ctxt->b == 0x0f) {
/* Two- or three-byte opcode */
ctxt->opcode_len = 2;
ctxt->b = insn_fetch(u8, ctxt);
opcode = twobyte_table[ctxt->b];
@@ -4889,18 +4987,16 @@ done_prefixes:
ctxt->b = insn_fetch(u8, ctxt);
opcode = opcode_map_0f_38[ctxt->b];
}
} else {
/* Opcode byte(s). */
opcode = opcode_table[ctxt->b];
}
ctxt->d = opcode.flags;
if (ctxt->d & ModRM)
if (opcode.flags & ModRM)
ctxt->modrm = insn_fetch(u8, ctxt);
/* vex-prefix instructions are not implemented */
if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
(mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
ctxt->d = NotImpl;
}
done_modrm:
ctxt->d = opcode.flags;
while (ctxt->d & GroupMask) {
switch (ctxt->d & GroupMask) {
case Group:
@@ -4919,9 +5015,9 @@ done_prefixes:
opcode = opcode.u.group[goffset];
break;
case Prefix:
if (ctxt->rep_prefix && op_prefix)
if (ctxt->rep_prefix && ctxt->op_prefix)
return EMULATION_FAILED;
simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
simd_prefix = ctxt->op_prefix ? 0x66 : ctxt->rep_prefix;
switch (simd_prefix) {
case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
@@ -4966,6 +5062,19 @@ done_prefixes:
if (ctxt->d == 0)
return EMULATION_FAILED;
if (unlikely(vex_prefix)) {
/*
* Only specifically marked instructions support VEX. Since many
* instructions support it but are not annotated, return not implemented
* rather than #UD.
*/
if (!(ctxt->d & Avx))
return EMULATION_FAILED;
if (!(ctxt->d & AlignMask))
ctxt->d |= Unaligned;
}
ctxt->execute = opcode.u.execute;
/*
@@ -5036,8 +5145,10 @@ done_prefixes:
if ((ctxt->d & No16) && ctxt->op_bytes == 2)
ctxt->op_bytes = 4;
if (ctxt->d & Sse)
ctxt->op_bytes = 16;
if (vex_prefix)
;
else if (ctxt->d & Sse)
ctxt->op_bytes = 16, ctxt->d &= ~Avx;
else if (ctxt->d & Mmx)
ctxt->op_bytes = 8;
}
@@ -5137,8 +5248,10 @@ void init_decode_cache(struct x86_emulate_ctxt *ctxt)
{
/* Clear fields that are set conditionally but read without a guard. */
ctxt->rip_relative = false;
ctxt->rex_prefix = 0;
ctxt->rex_prefix = REX_NONE;
ctxt->rex_bits = 0;
ctxt->lock_prefix = 0;
ctxt->op_prefix = false;
ctxt->rep_prefix = 0;
ctxt->regs_valid = 0;
ctxt->regs_dirty = 0;
@@ -5168,20 +5281,34 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt, bool check_intercepts)
}
if (unlikely(ctxt->d &
(No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
(No64|Undefined|Avx|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
(ctxt->d & Undefined)) {
rc = emulate_ud(ctxt);
goto done;
}
if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
|| ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
if ((ctxt->d & (Avx|Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM))) {
rc = emulate_ud(ctxt);
goto done;
}
if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
if (ctxt->d & Avx) {
u64 xcr = 0;
if (!(ops->get_cr(ctxt, 4) & X86_CR4_OSXSAVE)
|| ops->get_xcr(ctxt, 0, &xcr)
|| !(xcr & XFEATURE_MASK_YMM)) {
rc = emulate_ud(ctxt);
goto done;
}
} else if (ctxt->d & Sse) {
if (!(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR)) {
rc = emulate_ud(ctxt);
goto done;
}
}
if ((ctxt->d & (Avx|Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
rc = emulate_nm(ctxt);
goto done;
}

View File

@@ -15,6 +15,58 @@ typedef u32 __attribute__((vector_size(16))) sse128_t;
#define sse128_l3(x) ({ __sse128_u t; t.vec = x; t.as_u32[3]; })
#define sse128(lo, hi) ({ __sse128_u t; t.as_u64[0] = lo; t.as_u64[1] = hi; t.vec; })
typedef u32 __attribute__((vector_size(32))) avx256_t;
static inline void _kvm_read_avx_reg(int reg, avx256_t *data)
{
switch (reg) {
case 0: asm("vmovdqa %%ymm0, %0" : "=m"(*data)); break;
case 1: asm("vmovdqa %%ymm1, %0" : "=m"(*data)); break;
case 2: asm("vmovdqa %%ymm2, %0" : "=m"(*data)); break;
case 3: asm("vmovdqa %%ymm3, %0" : "=m"(*data)); break;
case 4: asm("vmovdqa %%ymm4, %0" : "=m"(*data)); break;
case 5: asm("vmovdqa %%ymm5, %0" : "=m"(*data)); break;
case 6: asm("vmovdqa %%ymm6, %0" : "=m"(*data)); break;
case 7: asm("vmovdqa %%ymm7, %0" : "=m"(*data)); break;
#ifdef CONFIG_X86_64
case 8: asm("vmovdqa %%ymm8, %0" : "=m"(*data)); break;
case 9: asm("vmovdqa %%ymm9, %0" : "=m"(*data)); break;
case 10: asm("vmovdqa %%ymm10, %0" : "=m"(*data)); break;
case 11: asm("vmovdqa %%ymm11, %0" : "=m"(*data)); break;
case 12: asm("vmovdqa %%ymm12, %0" : "=m"(*data)); break;
case 13: asm("vmovdqa %%ymm13, %0" : "=m"(*data)); break;
case 14: asm("vmovdqa %%ymm14, %0" : "=m"(*data)); break;
case 15: asm("vmovdqa %%ymm15, %0" : "=m"(*data)); break;
#endif
default: BUG();
}
}
static inline void _kvm_write_avx_reg(int reg, const avx256_t *data)
{
switch (reg) {
case 0: asm("vmovdqa %0, %%ymm0" : : "m"(*data)); break;
case 1: asm("vmovdqa %0, %%ymm1" : : "m"(*data)); break;
case 2: asm("vmovdqa %0, %%ymm2" : : "m"(*data)); break;
case 3: asm("vmovdqa %0, %%ymm3" : : "m"(*data)); break;
case 4: asm("vmovdqa %0, %%ymm4" : : "m"(*data)); break;
case 5: asm("vmovdqa %0, %%ymm5" : : "m"(*data)); break;
case 6: asm("vmovdqa %0, %%ymm6" : : "m"(*data)); break;
case 7: asm("vmovdqa %0, %%ymm7" : : "m"(*data)); break;
#ifdef CONFIG_X86_64
case 8: asm("vmovdqa %0, %%ymm8" : : "m"(*data)); break;
case 9: asm("vmovdqa %0, %%ymm9" : : "m"(*data)); break;
case 10: asm("vmovdqa %0, %%ymm10" : : "m"(*data)); break;
case 11: asm("vmovdqa %0, %%ymm11" : : "m"(*data)); break;
case 12: asm("vmovdqa %0, %%ymm12" : : "m"(*data)); break;
case 13: asm("vmovdqa %0, %%ymm13" : : "m"(*data)); break;
case 14: asm("vmovdqa %0, %%ymm14" : : "m"(*data)); break;
case 15: asm("vmovdqa %0, %%ymm15" : : "m"(*data)); break;
#endif
default: BUG();
}
}
static inline void _kvm_read_sse_reg(int reg, sse128_t *data)
{
switch (reg) {
@@ -109,6 +161,20 @@ static inline void kvm_fpu_put(void)
fpregs_unlock();
}
static inline void kvm_read_avx_reg(int reg, avx256_t *data)
{
kvm_fpu_get();
_kvm_read_avx_reg(reg, data);
kvm_fpu_put();
}
static inline void kvm_write_avx_reg(int reg, const avx256_t *data)
{
kvm_fpu_get();
_kvm_write_avx_reg(reg, data);
kvm_fpu_put();
}
static inline void kvm_read_sse_reg(int reg, sse128_t *data)
{
kvm_fpu_get();

View File

@@ -1568,7 +1568,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
* only, there can be valuable data in the rest which needs
* to be preserved e.g. on migration.
*/
if (__put_user(0, (u32 __user *)addr))
if (put_user(0, (u32 __user *)addr))
return 1;
hv_vcpu->hv_vapic = data;
kvm_vcpu_mark_page_dirty(vcpu, gfn);

View File

@@ -237,6 +237,7 @@ struct x86_emulate_ops {
bool (*is_smm)(struct x86_emulate_ctxt *ctxt);
int (*leave_smm)(struct x86_emulate_ctxt *ctxt);
void (*triple_fault)(struct x86_emulate_ctxt *ctxt);
int (*get_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 *xcr);
int (*set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr);
gva_t (*get_untagged_addr)(struct x86_emulate_ctxt *ctxt, gva_t addr,
@@ -248,7 +249,7 @@ struct x86_emulate_ops {
/* Type, address-of, and value of an instruction's operand. */
struct operand {
enum { OP_REG, OP_MEM, OP_MEM_STR, OP_IMM, OP_XMM, OP_MM, OP_NONE } type;
enum { OP_REG, OP_MEM, OP_MEM_STR, OP_IMM, OP_XMM, OP_YMM, OP_MM, OP_NONE } type;
unsigned int bytes;
unsigned int count;
union {
@@ -267,11 +268,12 @@ struct operand {
union {
unsigned long val;
u64 val64;
char valptr[sizeof(sse128_t)];
char valptr[sizeof(avx256_t)];
sse128_t vec_val;
avx256_t vec_val2;
u64 mm_val;
void *data;
};
} __aligned(32);
};
#define X86_MAX_INSTRUCTION_LENGTH 15
@@ -317,6 +319,14 @@ typedef void (*fastop_t)(struct fastop *);
#define NR_EMULATOR_GPRS 8
#endif
/*
* Distinguish between no prefix, REX, or in the future REX2.
*/
enum rex_type {
REX_NONE,
REX_PREFIX,
};
struct x86_emulate_ctxt {
void *vcpu;
const struct x86_emulate_ops *ops;
@@ -348,6 +358,7 @@ struct x86_emulate_ctxt {
u8 opcode_len;
u8 b;
u8 intercept;
bool op_prefix;
u8 op_bytes;
u8 ad_bytes;
union {
@@ -357,7 +368,8 @@ struct x86_emulate_ctxt {
int (*check_perm)(struct x86_emulate_ctxt *ctxt);
bool rip_relative;
u8 rex_prefix;
enum rex_type rex_prefix;
u8 rex_bits;
u8 lock_prefix;
u8 rep_prefix;
/* bitmaps of registers in _regs[] that can be read */

View File

@@ -2126,23 +2126,41 @@ static bool set_target_expiration(struct kvm_lapic *apic, u32 count_reg)
static void advance_periodic_target_expiration(struct kvm_lapic *apic)
{
struct kvm_timer *ktimer = &apic->lapic_timer;
ktime_t now = ktime_get();
u64 tscl = rdtsc();
ktime_t delta;
/*
* Synchronize both deadlines to the same time source or
* differences in the periods (caused by differences in the
* underlying clocks or numerical approximation errors) will
* cause the two to drift apart over time as the errors
* accumulate.
* Use kernel time as the time source for both the hrtimer deadline and
* TSC-based deadline so that they stay synchronized. Computing each
* deadline independently will cause the two deadlines to drift apart
* over time as differences in the periods accumulate, e.g. due to
* differences in the underlying clocks or numerical approximation errors.
*/
apic->lapic_timer.target_expiration =
ktime_add_ns(apic->lapic_timer.target_expiration,
apic->lapic_timer.period);
delta = ktime_sub(apic->lapic_timer.target_expiration, now);
apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
nsec_to_cycles(apic->vcpu, delta);
ktimer->target_expiration = ktime_add_ns(ktimer->target_expiration,
ktimer->period);
/*
* If the new expiration is in the past, e.g. because userspace stopped
* running the VM for an extended duration, then force the expiration
* to "now" and don't try to play catch-up with the missed events. KVM
* will only deliver a single interrupt regardless of how many events
* are pending, i.e. restarting the timer with an expiration in the
* past will do nothing more than waste host cycles, and can even lead
* to a hard lockup in extreme cases.
*/
if (ktime_before(ktimer->target_expiration, now))
ktimer->target_expiration = now;
/*
* Note, ensuring the expiration isn't in the past also prevents delta
* from going negative, which could cause the TSC deadline to become
* excessively large due to it an unsigned value.
*/
delta = ktime_sub(ktimer->target_expiration, now);
ktimer->tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
nsec_to_cycles(apic->vcpu, delta);
}
static void start_sw_period(struct kvm_lapic *apic)
@@ -2970,9 +2988,9 @@ static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
apic_timer_expired(apic, true);
if (lapic_is_periodic(apic)) {
if (lapic_is_periodic(apic) && !WARN_ON_ONCE(!apic->lapic_timer.period)) {
advance_periodic_target_expiration(apic);
hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
hrtimer_set_expires(&ktimer->timer, ktimer->target_expiration);
return HRTIMER_RESTART;
} else
return HRTIMER_NORESTART;

View File

@@ -235,8 +235,6 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
return -(u32)fault & errcode;
}
bool kvm_mmu_may_ignore_guest_pat(struct kvm *kvm);
int kvm_mmu_post_init_vm(struct kvm *kvm);
void kvm_mmu_pre_destroy_vm(struct kvm *kvm);

View File

@@ -4859,7 +4859,7 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
*/
BUILD_BUG_ON(lower_32_bits(PFERR_SYNTHETIC_MASK));
vcpu->arch.l1tf_flush_l1d = true;
kvm_request_l1tf_flush_l1d();
if (!flags) {
trace_kvm_page_fault(vcpu, fault_address, error_code);

View File

@@ -402,7 +402,7 @@ retry_walk:
goto error;
ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
if (unlikely(__get_user(pte, ptep_user)))
if (unlikely(get_user(pte, ptep_user)))
goto error;
walker->ptep_user[walker->level - 1] = ptep_user;

View File

@@ -292,7 +292,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
mark_page_dirty_in_slot(vcpu->kvm, slot, gfn);
}
if (static_branch_unlikely(&cpu_buf_vm_clear) &&
if (cpu_feature_enabled(X86_FEATURE_CLEAR_CPU_BUF_VM_MMIO) &&
!kvm_vcpu_can_access_host_mmio(vcpu) &&
kvm_is_mmio_pfn(pfn, &is_host_mmio))
kvm_track_host_mmio_mapping(vcpu);

View File

@@ -3442,13 +3442,8 @@ static bool svm_check_exit_valid(u64 exit_code)
static int svm_handle_invalid_exit(struct kvm_vcpu *vcpu, u64 exit_code)
{
vcpu_unimpl(vcpu, "svm: unexpected exit reason 0x%llx\n", exit_code);
dump_vmcb(vcpu);
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
vcpu->run->internal.ndata = 2;
vcpu->run->internal.data[0] = exit_code;
vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
kvm_prepare_unexpected_reason_exit(vcpu, exit_code);
return 0;
}
@@ -4251,7 +4246,6 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
svm_set_dr6(vcpu, DR6_ACTIVE_LOW);
clgi();
kvm_load_guest_xsave_state(vcpu);
/*
* Hardware only context switches DEBUGCTL if LBR virtualization is
@@ -4294,7 +4288,6 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
vcpu->arch.host_debugctl != svm->vmcb->save.dbgctl)
update_debugctlmsr(vcpu->arch.host_debugctl);
kvm_load_host_xsave_state(vcpu);
stgi();
/* Any pending NMI will happen here */
@@ -4326,14 +4319,6 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
vcpu->arch.regs_avail &= ~SVM_REGS_LAZY_LOAD_SET;
/*
* We need to handle MC intercepts here before the vcpu has a chance to
* change the physical cpu
*/
if (unlikely(svm->vmcb->control.exit_code ==
SVM_EXIT_EXCP_BASE + MC_VECTOR))
svm_handle_mce(vcpu);
trace_kvm_exit(vcpu, KVM_ISA_SVM);
svm_complete_interrupts(vcpu);
@@ -4622,8 +4607,16 @@ out:
static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
{
if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_INTR)
switch (to_svm(vcpu)->vmcb->control.exit_code) {
case SVM_EXIT_EXCP_BASE + MC_VECTOR:
svm_handle_mce(vcpu);
break;
case SVM_EXIT_INTR:
vcpu->arch.at_instruction_boundary = true;
break;
default:
break;
}
}
static void svm_setup_mce(struct kvm_vcpu *vcpu)

View File

@@ -92,6 +92,8 @@
jmp 901b
.endm
#define SVM_CLEAR_CPU_BUFFERS \
ALTERNATIVE "", __CLEAR_CPU_BUFFERS, X86_FEATURE_CLEAR_CPU_BUF_VM
/**
* __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
@@ -170,7 +172,7 @@ SYM_FUNC_START(__svm_vcpu_run)
mov VCPU_RDI(%_ASM_DI), %_ASM_DI
/* Clobbers EFLAGS.ZF */
VM_CLEAR_CPU_BUFFERS
SVM_CLEAR_CPU_BUFFERS
/* Enter guest mode */
3: vmrun %_ASM_AX
@@ -339,7 +341,7 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
mov KVM_VMCB_pa(%rax), %rax
/* Clobbers EFLAGS.ZF */
VM_CLEAR_CPU_BUFFERS
SVM_CLEAR_CPU_BUFFERS
/* Enter guest mode */
1: vmrun %rax

View File

@@ -3880,7 +3880,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
goto vmentry_failed;
/* Hide L1D cache contents from the nested guest. */
vmx->vcpu.arch.l1tf_flush_l1d = true;
kvm_request_l1tf_flush_l1d();
/*
* Must happen outside of nested_vmx_enter_non_root_mode() as it will

View File

@@ -2,12 +2,8 @@
#ifndef __KVM_X86_VMX_RUN_FLAGS_H
#define __KVM_X86_VMX_RUN_FLAGS_H
#define VMX_RUN_VMRESUME_SHIFT 0
#define VMX_RUN_SAVE_SPEC_CTRL_SHIFT 1
#define VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO_SHIFT 2
#define VMX_RUN_VMRESUME BIT(VMX_RUN_VMRESUME_SHIFT)
#define VMX_RUN_SAVE_SPEC_CTRL BIT(VMX_RUN_SAVE_SPEC_CTRL_SHIFT)
#define VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO BIT(VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO_SHIFT)
#define VMX_RUN_VMRESUME BIT(0)
#define VMX_RUN_SAVE_SPEC_CTRL BIT(1)
#define VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO BIT(2)
#endif /* __KVM_X86_VMX_RUN_FLAGS_H */

View File

@@ -763,25 +763,6 @@ static bool tdx_protected_apic_has_interrupt(struct kvm_vcpu *vcpu)
return tdx_vcpu_state_details_intr_pending(vcpu_state_details);
}
/*
* Compared to vmx_prepare_switch_to_guest(), there is not much to do
* as SEAMCALL/SEAMRET calls take care of most of save and restore.
*/
void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
{
struct vcpu_vt *vt = to_vt(vcpu);
if (vt->guest_state_loaded)
return;
if (likely(is_64bit_mm(current->mm)))
vt->msr_host_kernel_gs_base = current->thread.gsbase;
else
vt->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
vt->guest_state_loaded = true;
}
struct tdx_uret_msr {
u32 msr;
unsigned int slot;
@@ -795,19 +776,38 @@ static struct tdx_uret_msr tdx_uret_msrs[] = {
{.msr = MSR_TSC_AUX,},
};
static void tdx_user_return_msr_update_cache(void)
void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
{
struct vcpu_vt *vt = to_vt(vcpu);
int i;
if (vt->guest_state_loaded)
return;
if (likely(is_64bit_mm(current->mm)))
vt->msr_host_kernel_gs_base = current->thread.gsbase;
else
vt->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
vt->guest_state_loaded = true;
/*
* Explicitly set user-return MSRs that are clobbered by the TDX-Module
* if VP.ENTER succeeds, i.e. on TD-Exit, with the values that would be
* written by the TDX-Module. Don't rely on the TDX-Module to actually
* clobber the MSRs, as the contract is poorly defined and not upheld.
* E.g. the TDX-Module will synthesize an EPT Violation without doing
* VM-Enter if it suspects a zero-step attack, and never "restore" VMM
* state.
*/
for (i = 0; i < ARRAY_SIZE(tdx_uret_msrs); i++)
kvm_user_return_msr_update_cache(tdx_uret_msrs[i].slot,
tdx_uret_msrs[i].defval);
kvm_set_user_return_msr(tdx_uret_msrs[i].slot,
tdx_uret_msrs[i].defval, -1ull);
}
static void tdx_prepare_switch_to_host(struct kvm_vcpu *vcpu)
{
struct vcpu_vt *vt = to_vt(vcpu);
struct vcpu_tdx *tdx = to_tdx(vcpu);
if (!vt->guest_state_loaded)
return;
@@ -815,11 +815,6 @@ static void tdx_prepare_switch_to_host(struct kvm_vcpu *vcpu)
++vcpu->stat.host_state_reload;
wrmsrl(MSR_KERNEL_GS_BASE, vt->msr_host_kernel_gs_base);
if (tdx->guest_entered) {
tdx_user_return_msr_update_cache();
tdx->guest_entered = false;
}
vt->guest_state_loaded = false;
}
@@ -1059,7 +1054,6 @@ fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
update_debugctlmsr(vcpu->arch.host_debugctl);
tdx_load_host_xsave_state(vcpu);
tdx->guest_entered = true;
vcpu->arch.regs_avail &= TDX_REGS_AVAIL_SET;
@@ -1069,9 +1063,6 @@ fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
if (unlikely((tdx->vp_enter_ret & TDX_SW_ERROR) == TDX_SW_ERROR))
return EXIT_FASTPATH_NONE;
if (unlikely(vmx_get_exit_reason(vcpu).basic == EXIT_REASON_MCE_DURING_VMENTRY))
kvm_machine_check();
trace_kvm_exit(vcpu, KVM_ISA_VMX);
if (unlikely(tdx_failed_vmentry(vcpu)))
@@ -2145,11 +2136,7 @@ int tdx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t fastpath)
}
unhandled_exit:
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
vcpu->run->internal.ndata = 2;
vcpu->run->internal.data[0] = vp_enter_ret;
vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
kvm_prepare_unexpected_reason_exit(vcpu, vp_enter_ret);
return 0;
}
@@ -3447,10 +3434,6 @@ static int __init __tdx_bringup(void)
/*
* Check if MSRs (tdx_uret_msrs) can be saved/restored
* before returning to user space.
*
* this_cpu_ptr(user_return_msrs)->registered isn't checked
* because the registration is done at vcpu runtime by
* tdx_user_return_msr_update_cache().
*/
tdx_uret_msrs[i].slot = kvm_find_user_return_msr(tdx_uret_msrs[i].msr);
if (tdx_uret_msrs[i].slot == -1) {

View File

@@ -67,7 +67,6 @@ struct vcpu_tdx {
u64 vp_enter_ret;
enum vcpu_tdx_state state;
bool guest_entered;
u64 map_gpa_next;
u64 map_gpa_end;

View File

@@ -71,6 +71,7 @@
* @regs: unsigned long * (to guest registers)
* @flags: VMX_RUN_VMRESUME: use VMRESUME instead of VMLAUNCH
* VMX_RUN_SAVE_SPEC_CTRL: save guest SPEC_CTRL into vmx->spec_ctrl
* VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO: vCPU can access host MMIO
*
* Returns:
* 0 on VM-Exit, 1 on VM-Fail
@@ -92,7 +93,7 @@ SYM_FUNC_START(__vmx_vcpu_run)
/* Save @vmx for SPEC_CTRL handling */
push %_ASM_ARG1
/* Save @flags for SPEC_CTRL handling */
/* Save @flags (used for VMLAUNCH vs. VMRESUME and mitigations). */
push %_ASM_ARG3
/*
@@ -101,9 +102,6 @@ SYM_FUNC_START(__vmx_vcpu_run)
*/
push %_ASM_ARG2
/* Copy @flags to EBX, _ASM_ARG3 is volatile. */
mov %_ASM_ARG3L, %ebx
lea (%_ASM_SP), %_ASM_ARG2
call vmx_update_host_rsp
@@ -137,9 +135,6 @@ SYM_FUNC_START(__vmx_vcpu_run)
/* Load @regs to RAX. */
mov (%_ASM_SP), %_ASM_AX
/* Check if vmlaunch or vmresume is needed */
bt $VMX_RUN_VMRESUME_SHIFT, %ebx
/* Load guest registers. Don't clobber flags. */
mov VCPU_RCX(%_ASM_AX), %_ASM_CX
mov VCPU_RDX(%_ASM_AX), %_ASM_DX
@@ -160,11 +155,23 @@ SYM_FUNC_START(__vmx_vcpu_run)
/* Load guest RAX. This kills the @regs pointer! */
mov VCPU_RAX(%_ASM_AX), %_ASM_AX
/* Clobbers EFLAGS.ZF */
CLEAR_CPU_BUFFERS
/*
* Note, ALTERNATIVE_2 works in reverse order. If CLEAR_CPU_BUF_VM is
* enabled, do VERW unconditionally. If CPU_BUF_VM_MMIO is enabled,
* check @flags to see if the vCPU has access to host MMIO, and if so,
* do VERW. Else, do nothing (no mitigations needed/enabled).
*/
ALTERNATIVE_2 "", \
__stringify(testl $VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO, WORD_SIZE(%_ASM_SP); \
jz .Lskip_mmio_verw; \
VERW; \
.Lskip_mmio_verw:), \
X86_FEATURE_CLEAR_CPU_BUF_VM_MMIO, \
__stringify(VERW), X86_FEATURE_CLEAR_CPU_BUF_VM
/* Check EFLAGS.CF from the VMX_RUN_VMRESUME bit test above. */
jnc .Lvmlaunch
/* Check @flags to see if VMLAUNCH or VMRESUME is needed. */
testl $VMX_RUN_VMRESUME, WORD_SIZE(%_ASM_SP)
jz .Lvmlaunch
/*
* After a successful VMRESUME/VMLAUNCH, control flow "magically"

View File

@@ -203,6 +203,7 @@ module_param(pt_mode, int, S_IRUGO);
struct x86_pmu_lbr __ro_after_init vmx_lbr_caps;
#ifdef CONFIG_CPU_MITIGATIONS
static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond);
static DEFINE_MUTEX(vmx_l1d_flush_mutex);
@@ -225,7 +226,7 @@ static const struct {
#define L1D_CACHE_ORDER 4
static void *vmx_l1d_flush_pages;
static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
static int __vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
{
struct page *page;
unsigned int i;
@@ -302,6 +303,26 @@ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
return 0;
}
static int vmx_setup_l1d_flush(void)
{
/*
* Hand the parameter mitigation value in which was stored in the pre
* module init parser. If no parameter was given, it will contain
* 'auto' which will be turned into the default 'cond' mitigation mode.
*/
return __vmx_setup_l1d_flush(vmentry_l1d_flush_param);
}
static void vmx_cleanup_l1d_flush(void)
{
if (vmx_l1d_flush_pages) {
free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER);
vmx_l1d_flush_pages = NULL;
}
/* Restore state so sysfs ignores VMX */
l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
}
static int vmentry_l1d_flush_parse(const char *s)
{
unsigned int i;
@@ -339,7 +360,7 @@ static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
}
mutex_lock(&vmx_l1d_flush_mutex);
ret = vmx_setup_l1d_flush(l1tf);
ret = __vmx_setup_l1d_flush(l1tf);
mutex_unlock(&vmx_l1d_flush_mutex);
return ret;
}
@@ -352,6 +373,101 @@ static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
return sysfs_emit(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option);
}
/*
* Software based L1D cache flush which is used when microcode providing
* the cache control MSR is not loaded.
*
* The L1D cache is 32 KiB on Nehalem and later microarchitectures, but to
* flush it is required to read in 64 KiB because the replacement algorithm
* is not exactly LRU. This could be sized at runtime via topology
* information but as all relevant affected CPUs have 32KiB L1D cache size
* there is no point in doing so.
*/
static noinstr void vmx_l1d_flush(struct kvm_vcpu *vcpu)
{
int size = PAGE_SIZE << L1D_CACHE_ORDER;
if (!static_branch_unlikely(&vmx_l1d_should_flush))
return;
/*
* This code is only executed when the flush mode is 'cond' or
* 'always'
*/
if (static_branch_likely(&vmx_l1d_flush_cond)) {
/*
* Clear the per-cpu flush bit, it gets set again if the vCPU
* is reloaded, i.e. if the vCPU is scheduled out or if KVM
* exits to userspace, or if KVM reaches one of the unsafe
* VMEXIT handlers, e.g. if KVM calls into the emulator,
* or from the interrupt handlers.
*/
if (!kvm_get_cpu_l1tf_flush_l1d())
return;
kvm_clear_cpu_l1tf_flush_l1d();
}
vcpu->stat.l1d_flush++;
if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
native_wrmsrq(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
return;
}
asm volatile(
/* First ensure the pages are in the TLB */
"xorl %%eax, %%eax\n"
".Lpopulate_tlb:\n\t"
"movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
"addl $4096, %%eax\n\t"
"cmpl %%eax, %[size]\n\t"
"jne .Lpopulate_tlb\n\t"
"xorl %%eax, %%eax\n\t"
"cpuid\n\t"
/* Now fill the cache */
"xorl %%eax, %%eax\n"
".Lfill_cache:\n"
"movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
"addl $64, %%eax\n\t"
"cmpl %%eax, %[size]\n\t"
"jne .Lfill_cache\n\t"
"lfence\n"
:: [flush_pages] "r" (vmx_l1d_flush_pages),
[size] "r" (size)
: "eax", "ebx", "ecx", "edx");
}
#else /* CONFIG_CPU_MITIGATIONS*/
static int vmx_setup_l1d_flush(void)
{
l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NEVER;
return 0;
}
static void vmx_cleanup_l1d_flush(void)
{
l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
}
static __always_inline void vmx_l1d_flush(struct kvm_vcpu *vcpu)
{
}
static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
{
pr_warn_once("Kernel compiled without mitigations, ignoring vmentry_l1d_flush\n");
return 0;
}
static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
{
return sysfs_emit(s, "never\n");
}
#endif
static const struct kernel_param_ops vmentry_l1d_flush_ops = {
.set = vmentry_l1d_flush_set,
.get = vmentry_l1d_flush_get,
};
module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644);
static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx)
{
u64 msr;
@@ -404,12 +520,6 @@ static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
vmx->disable_fb_clear = false;
}
static const struct kernel_param_ops vmentry_l1d_flush_ops = {
.set = vmentry_l1d_flush_set,
.get = vmentry_l1d_flush_get,
};
module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644);
static u32 vmx_segment_access_rights(struct kvm_segment *var);
void vmx_vmexit(void);
@@ -903,7 +1013,7 @@ unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx)
if (!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL))
flags |= VMX_RUN_SAVE_SPEC_CTRL;
if (static_branch_unlikely(&cpu_buf_vm_clear) &&
if (cpu_feature_enabled(X86_FEATURE_CLEAR_CPU_BUF_VM_MMIO) &&
kvm_vcpu_can_access_host_mmio(&vmx->vcpu))
flags |= VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO;
@@ -6631,15 +6741,8 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
return kvm_vmx_exit_handlers[exit_handler_index](vcpu);
unexpected_vmexit:
vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
exit_reason.full);
dump_vmcs(vcpu);
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror =
KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
vcpu->run->internal.ndata = 2;
vcpu->run->internal.data[0] = exit_reason.full;
vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
kvm_prepare_unexpected_reason_exit(vcpu, exit_reason.full);
return 0;
}
@@ -6661,77 +6764,6 @@ int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
return ret;
}
/*
* Software based L1D cache flush which is used when microcode providing
* the cache control MSR is not loaded.
*
* The L1D cache is 32 KiB on Nehalem and later microarchitectures, but to
* flush it is required to read in 64 KiB because the replacement algorithm
* is not exactly LRU. This could be sized at runtime via topology
* information but as all relevant affected CPUs have 32KiB L1D cache size
* there is no point in doing so.
*/
static noinstr void vmx_l1d_flush(struct kvm_vcpu *vcpu)
{
int size = PAGE_SIZE << L1D_CACHE_ORDER;
/*
* This code is only executed when the flush mode is 'cond' or
* 'always'
*/
if (static_branch_likely(&vmx_l1d_flush_cond)) {
bool flush_l1d;
/*
* Clear the per-vcpu flush bit, it gets set again if the vCPU
* is reloaded, i.e. if the vCPU is scheduled out or if KVM
* exits to userspace, or if KVM reaches one of the unsafe
* VMEXIT handlers, e.g. if KVM calls into the emulator.
*/
flush_l1d = vcpu->arch.l1tf_flush_l1d;
vcpu->arch.l1tf_flush_l1d = false;
/*
* Clear the per-cpu flush bit, it gets set again from
* the interrupt handlers.
*/
flush_l1d |= kvm_get_cpu_l1tf_flush_l1d();
kvm_clear_cpu_l1tf_flush_l1d();
if (!flush_l1d)
return;
}
vcpu->stat.l1d_flush++;
if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
native_wrmsrq(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
return;
}
asm volatile(
/* First ensure the pages are in the TLB */
"xorl %%eax, %%eax\n"
".Lpopulate_tlb:\n\t"
"movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
"addl $4096, %%eax\n\t"
"cmpl %%eax, %[size]\n\t"
"jne .Lpopulate_tlb\n\t"
"xorl %%eax, %%eax\n\t"
"cpuid\n\t"
/* Now fill the cache */
"xorl %%eax, %%eax\n"
".Lfill_cache:\n"
"movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
"addl $64, %%eax\n\t"
"cmpl %%eax, %[size]\n\t"
"jne .Lfill_cache\n\t"
"lfence\n"
:: [flush_pages] "r" (vmx_l1d_flush_pages),
[size] "r" (size)
: "eax", "ebx", "ecx", "edx");
}
void vmx_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
@@ -7050,10 +7082,19 @@ void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
if (to_vt(vcpu)->emulation_required)
return;
if (vmx_get_exit_reason(vcpu).basic == EXIT_REASON_EXTERNAL_INTERRUPT)
switch (vmx_get_exit_reason(vcpu).basic) {
case EXIT_REASON_EXTERNAL_INTERRUPT:
handle_external_interrupt_irqoff(vcpu, vmx_get_intr_info(vcpu));
else if (vmx_get_exit_reason(vcpu).basic == EXIT_REASON_EXCEPTION_NMI)
break;
case EXIT_REASON_EXCEPTION_NMI:
handle_exception_irqoff(vcpu, vmx_get_intr_info(vcpu));
break;
case EXIT_REASON_MCE_DURING_VMENTRY:
kvm_machine_check();
break;
default:
break;
}
}
/*
@@ -7328,21 +7369,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
guest_state_enter_irqoff();
/*
* L1D Flush includes CPU buffer clear to mitigate MDS, but VERW
* mitigation for MDS is done late in VMentry and is still
* executed in spite of L1D Flush. This is because an extra VERW
* should not matter much after the big hammer L1D Flush.
*
* cpu_buf_vm_clear is used when system is not vulnerable to MDS/TAA,
* and is affected by MMIO Stale Data. In such cases mitigation in only
* needed against an MMIO capable guest.
*/
if (static_branch_unlikely(&vmx_l1d_should_flush))
vmx_l1d_flush(vcpu);
else if (static_branch_unlikely(&cpu_buf_vm_clear) &&
(flags & VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO))
x86_clear_cpu_buffers();
vmx_l1d_flush(vcpu);
vmx_disable_fb_clear(vmx);
@@ -7454,8 +7481,6 @@ fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
vmx_set_interrupt_shadow(vcpu, 0);
kvm_load_guest_xsave_state(vcpu);
pt_guest_enter(vmx);
atomic_switch_perf_msrs(vmx);
@@ -7499,8 +7524,6 @@ fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
pt_guest_exit(vmx);
kvm_load_host_xsave_state(vcpu);
if (is_guest_mode(vcpu)) {
/*
* Track VMLAUNCH/VMRESUME that have made past guest state
@@ -7516,9 +7539,6 @@ fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
if (unlikely(vmx->fail))
return EXIT_FASTPATH_NONE;
if (unlikely((u16)vmx_get_exit_reason(vcpu).basic == EXIT_REASON_MCE_DURING_VMENTRY))
kvm_machine_check();
trace_kvm_exit(vcpu, KVM_ISA_VMX);
if (unlikely(vmx_get_exit_reason(vcpu).failed_vmentry))
@@ -8679,16 +8699,6 @@ __init int vmx_hardware_setup(void)
return r;
}
static void vmx_cleanup_l1d_flush(void)
{
if (vmx_l1d_flush_pages) {
free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER);
vmx_l1d_flush_pages = NULL;
}
/* Restore state so sysfs ignores VMX */
l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
}
void vmx_exit(void)
{
allow_smaller_maxphyaddr = false;
@@ -8724,14 +8734,8 @@ int __init vmx_init(void)
if (r)
return r;
/*
* Must be called after common x86 init so enable_ept is properly set
* up. Hand the parameter mitigation value in which was stored in
* the pre module init parser. If no parameter was given, it will
* contain 'auto' which will be turned into the default 'cond'
* mitigation mode.
*/
r = vmx_setup_l1d_flush(vmentry_l1d_flush_param);
/* Must be called after common x86 init so enable_ept is setup. */
r = vmx_setup_l1d_flush();
if (r)
goto err_l1d_flush;

View File

@@ -159,9 +159,6 @@ EXPORT_SYMBOL_FOR_KVM_INTERNAL(report_ignored_msrs);
unsigned int min_timer_period_us = 200;
module_param(min_timer_period_us, uint, 0644);
static bool __read_mostly kvmclock_periodic_sync = true;
module_param(kvmclock_periodic_sync, bool, 0444);
/* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
static u32 __read_mostly tsc_tolerance_ppm = 250;
module_param(tsc_tolerance_ppm, uint, 0644);
@@ -212,7 +209,7 @@ struct kvm_user_return_msrs {
u32 __read_mostly kvm_nr_uret_msrs;
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_nr_uret_msrs);
static u32 __read_mostly kvm_uret_msrs_list[KVM_MAX_NR_USER_RETURN_MSRS];
static struct kvm_user_return_msrs __percpu *user_return_msrs;
static DEFINE_PER_CPU(struct kvm_user_return_msrs, user_return_msrs);
#define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
| XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
@@ -575,24 +572,26 @@ static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
vcpu->arch.apf.gfns[i] = ~0;
}
static void kvm_destroy_user_return_msrs(void)
{
int cpu;
for_each_possible_cpu(cpu)
WARN_ON_ONCE(per_cpu(user_return_msrs, cpu).registered);
kvm_nr_uret_msrs = 0;
}
static void kvm_on_user_return(struct user_return_notifier *urn)
{
unsigned slot;
struct kvm_user_return_msrs *msrs
= container_of(urn, struct kvm_user_return_msrs, urn);
struct kvm_user_return_msr_values *values;
unsigned long flags;
/*
* Disabling irqs at this point since the following code could be
* interrupted and executed through kvm_arch_disable_virtualization_cpu()
*/
local_irq_save(flags);
if (msrs->registered) {
msrs->registered = false;
user_return_notifier_unregister(urn);
}
local_irq_restore(flags);
msrs->registered = false;
user_return_notifier_unregister(urn);
for (slot = 0; slot < kvm_nr_uret_msrs; ++slot) {
values = &msrs->values[slot];
if (values->host != values->curr) {
@@ -643,7 +642,7 @@ EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_find_user_return_msr);
static void kvm_user_return_msr_cpu_online(void)
{
struct kvm_user_return_msrs *msrs = this_cpu_ptr(user_return_msrs);
struct kvm_user_return_msrs *msrs = this_cpu_ptr(&user_return_msrs);
u64 value;
int i;
@@ -665,7 +664,7 @@ static void kvm_user_return_register_notifier(struct kvm_user_return_msrs *msrs)
int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask)
{
struct kvm_user_return_msrs *msrs = this_cpu_ptr(user_return_msrs);
struct kvm_user_return_msrs *msrs = this_cpu_ptr(&user_return_msrs);
int err;
value = (value & mask) | (msrs->values[slot].host & ~mask);
@@ -681,24 +680,15 @@ int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask)
}
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_user_return_msr);
void kvm_user_return_msr_update_cache(unsigned int slot, u64 value)
{
struct kvm_user_return_msrs *msrs = this_cpu_ptr(user_return_msrs);
msrs->values[slot].curr = value;
kvm_user_return_register_notifier(msrs);
}
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_user_return_msr_update_cache);
u64 kvm_get_user_return_msr(unsigned int slot)
{
return this_cpu_ptr(user_return_msrs)->values[slot].curr;
return this_cpu_ptr(&user_return_msrs)->values[slot].curr;
}
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_user_return_msr);
static void drop_user_return_notifiers(void)
{
struct kvm_user_return_msrs *msrs = this_cpu_ptr(user_return_msrs);
struct kvm_user_return_msrs *msrs = this_cpu_ptr(&user_return_msrs);
if (msrs->registered)
kvm_on_user_return(&msrs->urn);
@@ -1045,6 +1035,13 @@ bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr)
}
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_require_dr);
static bool kvm_pv_async_pf_enabled(struct kvm_vcpu *vcpu)
{
u64 mask = KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT;
return (vcpu->arch.apf.msr_en_val & mask) == mask;
}
static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu)
{
return vcpu->arch.reserved_gpa_bits | rsvd_bits(5, 8) | rsvd_bits(1, 2);
@@ -1137,15 +1134,20 @@ void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned lon
}
if ((cr0 ^ old_cr0) & X86_CR0_PG) {
kvm_clear_async_pf_completion_queue(vcpu);
kvm_async_pf_hash_reset(vcpu);
/*
* Clearing CR0.PG is defined to flush the TLB from the guest's
* perspective.
*/
if (!(cr0 & X86_CR0_PG))
kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
/*
* Check for async #PF completion events when enabling paging,
* as the vCPU may have previously encountered async #PFs (it's
* entirely legal for the guest to toggle paging on/off without
* waiting for the async #PF queue to drain).
*/
else if (kvm_pv_async_pf_enabled(vcpu))
kvm_make_request(KVM_REQ_APF_READY, vcpu);
}
if ((cr0 ^ old_cr0) & KVM_MMU_CR0_ROLE_BITS)
@@ -1203,20 +1205,27 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
}
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_lmsw);
void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
static void kvm_load_xfeatures(struct kvm_vcpu *vcpu, bool load_guest)
{
if (vcpu->arch.guest_state_protected)
return;
if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) {
if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE))
return;
if (vcpu->arch.xcr0 != kvm_host.xcr0)
xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
if (vcpu->arch.xcr0 != kvm_host.xcr0)
xsetbv(XCR_XFEATURE_ENABLED_MASK,
load_guest ? vcpu->arch.xcr0 : kvm_host.xcr0);
if (guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVES) &&
vcpu->arch.ia32_xss != kvm_host.xss)
wrmsrq(MSR_IA32_XSS, vcpu->arch.ia32_xss);
}
if (guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVES) &&
vcpu->arch.ia32_xss != kvm_host.xss)
wrmsrq(MSR_IA32_XSS, load_guest ? vcpu->arch.ia32_xss : kvm_host.xss);
}
static void kvm_load_guest_pkru(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.guest_state_protected)
return;
if (cpu_feature_enabled(X86_FEATURE_PKU) &&
vcpu->arch.pkru != vcpu->arch.host_pkru &&
@@ -1224,9 +1233,8 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE)))
wrpkru(vcpu->arch.pkru);
}
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_load_guest_xsave_state);
void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
static void kvm_load_host_pkru(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.guest_state_protected)
return;
@@ -1238,19 +1246,7 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
if (vcpu->arch.pkru != vcpu->arch.host_pkru)
wrpkru(vcpu->arch.host_pkru);
}
if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) {
if (vcpu->arch.xcr0 != kvm_host.xcr0)
xsetbv(XCR_XFEATURE_ENABLED_MASK, kvm_host.xcr0);
if (guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVES) &&
vcpu->arch.ia32_xss != kvm_host.xss)
wrmsrq(MSR_IA32_XSS, kvm_host.xss);
}
}
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_load_host_xsave_state);
#ifdef CONFIG_X86_64
static inline u64 kvm_guest_supported_xfd(struct kvm_vcpu *vcpu)
@@ -3505,27 +3501,17 @@ uint64_t kvm_get_wall_clock_epoch(struct kvm *kvm)
/*
* kvmclock updates which are isolated to a given vcpu, such as
* vcpu->cpu migration, should not allow system_timestamp from
* the rest of the vcpus to remain static. Otherwise ntp frequency
* correction applies to one vcpu's system_timestamp but not
* the others.
* the rest of the vcpus to remain static.
*
* So in those cases, request a kvmclock update for all vcpus.
* We need to rate-limit these requests though, as they can
* considerably slow guests that have a large number of vcpus.
* The time for a remote vcpu to update its kvmclock is bound
* by the delay we use to rate-limit the updates.
* The worst case for a remote vcpu to update its kvmclock
* is then bounded by maximum nohz sleep latency.
*/
#define KVMCLOCK_UPDATE_DELAY msecs_to_jiffies(100)
static void kvmclock_update_fn(struct work_struct *work)
static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
{
unsigned long i;
struct delayed_work *dwork = to_delayed_work(work);
struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
kvmclock_update_work);
struct kvm *kvm = container_of(ka, struct kvm, arch);
struct kvm_vcpu *vcpu;
struct kvm *kvm = v->kvm;
kvm_for_each_vcpu(i, vcpu, kvm) {
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -3533,29 +3519,6 @@ static void kvmclock_update_fn(struct work_struct *work)
}
}
static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
{
struct kvm *kvm = v->kvm;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
schedule_delayed_work(&kvm->arch.kvmclock_update_work,
KVMCLOCK_UPDATE_DELAY);
}
#define KVMCLOCK_SYNC_PERIOD (300 * HZ)
static void kvmclock_sync_fn(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
kvmclock_sync_work);
struct kvm *kvm = container_of(ka, struct kvm, arch);
schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0);
schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
KVMCLOCK_SYNC_PERIOD);
}
/* These helpers are safe iff @msr is known to be an MCx bank MSR. */
static bool is_mci_control_msr(u32 msr)
{
@@ -3650,13 +3613,6 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 0;
}
static inline bool kvm_pv_async_pf_enabled(struct kvm_vcpu *vcpu)
{
u64 mask = KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT;
return (vcpu->arch.apf.msr_en_val & mask) == mask;
}
static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
{
gpa_t gpa = data & ~0x3f;
@@ -4182,7 +4138,12 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
return 1;
if (data & 0x1) {
vcpu->arch.apf.pageready_pending = false;
/*
* Pairs with the smp_mb__after_atomic() in
* kvm_arch_async_page_present_queued().
*/
smp_store_mb(vcpu->arch.apf.pageready_pending, false);
kvm_check_async_pf_completion(vcpu);
}
break;
@@ -5188,7 +5149,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
vcpu->arch.l1tf_flush_l1d = true;
kvm_request_l1tf_flush_l1d();
if (vcpu->scheduled_out && pmu->version && pmu->event_count) {
pmu->need_cleanup = true;
@@ -7998,7 +7959,7 @@ int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
unsigned int bytes, struct x86_exception *exception)
{
/* kvm_write_guest_virt_system can pull in tons of pages. */
vcpu->arch.l1tf_flush_l1d = true;
kvm_request_l1tf_flush_l1d();
return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
PFERR_WRITE_MASK, exception);
@@ -8842,6 +8803,14 @@ static void emulator_triple_fault(struct x86_emulate_ctxt *ctxt)
kvm_make_request(KVM_REQ_TRIPLE_FAULT, emul_to_vcpu(ctxt));
}
static int emulator_get_xcr(struct x86_emulate_ctxt *ctxt, u32 index, u64 *xcr)
{
if (index != XCR_XFEATURE_ENABLED_MASK)
return 1;
*xcr = emul_to_vcpu(ctxt)->arch.xcr0;
return 0;
}
static int emulator_set_xcr(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr)
{
return __kvm_set_xcr(emul_to_vcpu(ctxt), index, xcr);
@@ -8914,6 +8883,7 @@ static const struct x86_emulate_ops emulate_ops = {
.is_smm = emulator_is_smm,
.leave_smm = emulator_leave_smm,
.triple_fault = emulator_triple_fault,
.get_xcr = emulator_get_xcr,
.set_xcr = emulator_set_xcr,
.get_untagged_addr = emulator_get_untagged_addr,
.is_canonical_addr = emulator_is_canonical_addr,
@@ -9109,6 +9079,18 @@ void kvm_prepare_event_vectoring_exit(struct kvm_vcpu *vcpu, gpa_t gpa)
}
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_prepare_event_vectoring_exit);
void kvm_prepare_unexpected_reason_exit(struct kvm_vcpu *vcpu, u64 exit_reason)
{
vcpu_unimpl(vcpu, "unexpected exit reason 0x%llx\n", exit_reason);
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
vcpu->run->internal.ndata = 2;
vcpu->run->internal.data[0] = exit_reason;
vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
}
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_prepare_unexpected_reason_exit);
static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type)
{
struct kvm *kvm = vcpu->kvm;
@@ -9394,7 +9376,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
return handle_emulation_failure(vcpu, emulation_type);
}
vcpu->arch.l1tf_flush_l1d = true;
kvm_request_l1tf_flush_l1d();
if (!(emulation_type & EMULTYPE_NO_DECODE)) {
kvm_clear_exception_queue(vcpu);
@@ -10031,17 +10013,9 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
return -ENOMEM;
}
user_return_msrs = alloc_percpu(struct kvm_user_return_msrs);
if (!user_return_msrs) {
pr_err("failed to allocate percpu kvm_user_return_msrs\n");
r = -ENOMEM;
goto out_free_x86_emulator_cache;
}
kvm_nr_uret_msrs = 0;
r = kvm_mmu_vendor_module_init();
if (r)
goto out_free_percpu;
goto out_free_x86_emulator_cache;
kvm_caps.supported_vm_types = BIT(KVM_X86_DEFAULT_VM);
kvm_caps.supported_mce_cap = MCG_CTL_P | MCG_SER_P;
@@ -10066,6 +10040,8 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
rdmsrq(MSR_IA32_ARCH_CAPABILITIES, kvm_host.arch_capabilities);
WARN_ON_ONCE(kvm_nr_uret_msrs);
r = ops->hardware_setup();
if (r != 0)
goto out_mmu_exit;
@@ -10138,9 +10114,8 @@ out_unwind_ops:
kvm_x86_ops.enable_virtualization_cpu = NULL;
kvm_x86_call(hardware_unsetup)();
out_mmu_exit:
kvm_destroy_user_return_msrs();
kvm_mmu_vendor_module_exit();
out_free_percpu:
free_percpu(user_return_msrs);
out_free_x86_emulator_cache:
kmem_cache_destroy(x86_emulator_cache);
return r;
@@ -10168,8 +10143,8 @@ void kvm_x86_vendor_exit(void)
cancel_work_sync(&pvclock_gtod_work);
#endif
kvm_x86_call(hardware_unsetup)();
kvm_destroy_user_return_msrs();
kvm_mmu_vendor_module_exit();
free_percpu(user_return_msrs);
kmem_cache_destroy(x86_emulator_cache);
#ifdef CONFIG_KVM_XEN
static_key_deferred_flush(&kvm_xen_enabled);
@@ -11291,6 +11266,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (vcpu->arch.guest_fpu.xfd_err)
wrmsrq(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err);
kvm_load_xfeatures(vcpu, true);
if (unlikely(vcpu->arch.switch_db_regs &&
!(vcpu->arch.switch_db_regs & KVM_DEBUGREG_AUTO_SWITCH))) {
set_debugreg(DR7_FIXED_1, 7);
@@ -11319,6 +11296,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
guest_timing_enter_irqoff();
/*
* Swap PKRU with hardware breakpoints disabled to minimize the number
* of flows where non-KVM code can run with guest state loaded.
*/
kvm_load_guest_pkru(vcpu);
for (;;) {
/*
* Assert that vCPU vs. VM APICv state is consistent. An APICv
@@ -11347,6 +11330,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
++vcpu->stat.exits;
}
kvm_load_host_pkru(vcpu);
/*
* Do this here before restoring debug registers on the host. And
* since we do this before handling the vmexit, a DR access vmexit
@@ -11377,6 +11362,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb();
kvm_load_xfeatures(vcpu, false);
/*
* Sync xfd before calling handle_exit_irqoff() which may
* rely on the fact that guest_fpu::xfd is up-to-date (e.g.
@@ -12734,8 +12721,6 @@ fail_mmu_destroy:
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
if (mutex_lock_killable(&vcpu->mutex))
return;
vcpu_load(vcpu);
@@ -12746,10 +12731,6 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
vcpu->arch.msr_kvm_poll_control = 1;
mutex_unlock(&vcpu->mutex);
if (kvmclock_periodic_sync && vcpu->vcpu_idx == 0)
schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
KVMCLOCK_SYNC_PERIOD);
}
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
@@ -13088,7 +13069,21 @@ int kvm_arch_enable_virtualization_cpu(void)
void kvm_arch_disable_virtualization_cpu(void)
{
kvm_x86_call(disable_virtualization_cpu)();
drop_user_return_notifiers();
/*
* Leave the user-return notifiers as-is when disabling virtualization
* for reboot, i.e. when disabling via IPI function call, and instead
* pin kvm.ko (if it's a module) to defend against use-after-free (in
* the *very* unlikely scenario module unload is racing with reboot).
* On a forced reboot, tasks aren't frozen before shutdown, and so KVM
* could be actively modifying user-return MSR state when the IPI to
* disable virtualization arrives. Handle the extreme edge case here
* instead of trying to account for it in the normal flows.
*/
if (in_task() || WARN_ON_ONCE(!kvm_rebooting))
drop_user_return_notifiers();
else
__module_get(THIS_MODULE);
}
bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
@@ -13160,9 +13155,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.hv_root_tdp = INVALID_PAGE;
#endif
INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
kvm_apicv_init(kvm);
kvm_hv_init_vm(kvm);
kvm_xen_init_vm(kvm);
@@ -13269,9 +13261,6 @@ void kvm_arch_pre_destroy_vm(struct kvm *kvm)
* is unsafe, i.e. will lead to use-after-free. The PIT also needs to
* be stopped before IRQ routing is freed.
*/
cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work);
cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work);
#ifdef CONFIG_KVM_IOAPIC
kvm_free_pit(kvm);
#endif
@@ -13888,7 +13877,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
if ((work->wakeup_all || work->notpresent_injected) &&
kvm_pv_async_pf_enabled(vcpu) &&
!apf_put_user_ready(vcpu, work->arch.token)) {
vcpu->arch.apf.pageready_pending = true;
WRITE_ONCE(vcpu->arch.apf.pageready_pending, true);
kvm_apic_set_irq(vcpu, &irq, NULL);
}
@@ -13899,7 +13888,11 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu)
{
kvm_make_request(KVM_REQ_APF_READY, vcpu);
if (!vcpu->arch.apf.pageready_pending)
/* Pairs with smp_store_mb() in kvm_set_msr_common(). */
smp_mb__after_atomic();
if (!READ_ONCE(vcpu->arch.apf.pageready_pending))
kvm_vcpu_kick(vcpu);
}

View File

@@ -420,6 +420,20 @@ static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
return !(kvm->arch.disabled_quirks & quirk);
}
static __always_inline void kvm_request_l1tf_flush_l1d(void)
{
#if IS_ENABLED(CONFIG_CPU_MITIGATIONS) && IS_ENABLED(CONFIG_KVM_INTEL)
/*
* Use a raw write to set the per-CPU flag, as KVM will ensure a flush
* even if preemption is currently enabled.. If the current vCPU task
* is migrated to a different CPU (or userspace runs the vCPU on a
* different task) before the next VM-Entry, then kvm_arch_vcpu_load()
* will request a flush on the new CPU.
*/
raw_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 1);
#endif
}
void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
u64 get_kvmclock_ns(struct kvm *kvm);
@@ -622,8 +636,6 @@ static inline void kvm_machine_check(void)
#endif
}
void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
int kvm_spec_ctrl_test_value(u64 value);
int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
struct x86_exception *e);