mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
KVM: selftests: Extend vmx_close_while_nested_test to cover SVM
Add SVM L1 code to run the nested guest, and allow the test to run with SVM as well as VMX. Reviewed-by: Jim Mattson <jmattson@google.com> Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev> Link: https://patch.msgid.link/20251021074736.1324328-4-yosry.ahmed@linux.dev [sean: rename to "nested_close_kvm_test" to provide nested_* sorting] Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
committed by
Sean Christopherson
parent
9e4ce7a89e
commit
0a9eb2afa1
@@ -88,6 +88,7 @@ TEST_GEN_PROGS_x86 += x86/kvm_pv_test
|
||||
TEST_GEN_PROGS_x86 += x86/kvm_buslock_test
|
||||
TEST_GEN_PROGS_x86 += x86/monitor_mwait_test
|
||||
TEST_GEN_PROGS_x86 += x86/msrs_test
|
||||
TEST_GEN_PROGS_x86 += x86/nested_close_kvm_test
|
||||
TEST_GEN_PROGS_x86 += x86/nested_emulation_test
|
||||
TEST_GEN_PROGS_x86 += x86/nested_exceptions_test
|
||||
TEST_GEN_PROGS_x86 += x86/platform_info_test
|
||||
@@ -111,7 +112,6 @@ TEST_GEN_PROGS_x86 += x86/ucna_injection_test
|
||||
TEST_GEN_PROGS_x86 += x86/userspace_io_test
|
||||
TEST_GEN_PROGS_x86 += x86/userspace_msr_exit_test
|
||||
TEST_GEN_PROGS_x86 += x86/vmx_apic_access_test
|
||||
TEST_GEN_PROGS_x86 += x86/vmx_close_while_nested_test
|
||||
TEST_GEN_PROGS_x86 += x86/vmx_dirty_log_test
|
||||
TEST_GEN_PROGS_x86 += x86/vmx_exception_with_invalid_guest_state
|
||||
TEST_GEN_PROGS_x86 += x86/vmx_msrs_test
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* vmx_close_while_nested
|
||||
*
|
||||
* Copyright (C) 2019, Red Hat, Inc.
|
||||
*
|
||||
* Verify that nothing bad happens if a KVM user exits with open
|
||||
@@ -12,6 +10,7 @@
|
||||
#include "kvm_util.h"
|
||||
#include "processor.h"
|
||||
#include "vmx.h"
|
||||
#include "svm_util.h"
|
||||
|
||||
#include <string.h>
|
||||
#include <sys/ioctl.h>
|
||||
@@ -22,6 +21,8 @@ enum {
|
||||
PORT_L0_EXIT = 0x2000,
|
||||
};
|
||||
|
||||
#define L2_GUEST_STACK_SIZE 64
|
||||
|
||||
static void l2_guest_code(void)
|
||||
{
|
||||
/* Exit to L0 */
|
||||
@@ -29,9 +30,8 @@ static void l2_guest_code(void)
|
||||
: : [port] "d" (PORT_L0_EXIT) : "rax");
|
||||
}
|
||||
|
||||
static void l1_guest_code(struct vmx_pages *vmx_pages)
|
||||
static void l1_vmx_code(struct vmx_pages *vmx_pages)
|
||||
{
|
||||
#define L2_GUEST_STACK_SIZE 64
|
||||
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
|
||||
|
||||
GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
|
||||
@@ -45,19 +45,43 @@ static void l1_guest_code(struct vmx_pages *vmx_pages)
|
||||
GUEST_ASSERT(0);
|
||||
}
|
||||
|
||||
static void l1_svm_code(struct svm_test_data *svm)
|
||||
{
|
||||
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
|
||||
|
||||
/* Prepare the VMCB for L2 execution. */
|
||||
generic_svm_setup(svm, l2_guest_code,
|
||||
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
|
||||
|
||||
run_guest(svm->vmcb, svm->vmcb_gpa);
|
||||
GUEST_ASSERT(0);
|
||||
}
|
||||
|
||||
static void l1_guest_code(void *data)
|
||||
{
|
||||
if (this_cpu_has(X86_FEATURE_VMX))
|
||||
l1_vmx_code(data);
|
||||
else
|
||||
l1_svm_code(data);
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
vm_vaddr_t vmx_pages_gva;
|
||||
vm_vaddr_t guest_gva;
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm;
|
||||
|
||||
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
|
||||
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX) ||
|
||||
kvm_cpu_has(X86_FEATURE_SVM));
|
||||
|
||||
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
|
||||
|
||||
/* Allocate VMX pages and shared descriptors (vmx_pages). */
|
||||
vcpu_alloc_vmx(vm, &vmx_pages_gva);
|
||||
vcpu_args_set(vcpu, 1, vmx_pages_gva);
|
||||
if (kvm_cpu_has(X86_FEATURE_VMX))
|
||||
vcpu_alloc_vmx(vm, &guest_gva);
|
||||
else
|
||||
vcpu_alloc_svm(vm, &guest_gva);
|
||||
|
||||
vcpu_args_set(vcpu, 1, guest_gva);
|
||||
|
||||
for (;;) {
|
||||
volatile struct kvm_run *run = vcpu->run;
|
||||
Reference in New Issue
Block a user