mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
Merge branch kvm-arm64/misc into kvmarm/next
* kvm-arm64/misc: : Miscellaneous updates : : - Drop useless check against vgic state in ICC_CLTR_EL1.SEIS read : emulation : : - Fix trap configuration for pKVM : : - Close the door on initialization bugs surrounding userspace irqchip : static key by removing it. KVM: selftests: Don't bother deleting memslots in KVM when freeing VMs KVM: arm64: Get rid of userspace_irqchip_in_use KVM: arm64: Initialize trap register values in hyp in pKVM KVM: arm64: Initialize the hypervisor's VM state at EL2 KVM: arm64: Refactor kvm_vcpu_enable_ptrauth() for hyp use KVM: arm64: Move pkvm_vcpu_init_traps() to init_pkvm_hyp_vcpu() KVM: arm64: Don't map 'kvm_vgic_global_state' at EL2 with pKVM KVM: arm64: Just advertise SEIS as 0 when emulating ICC_CTLR_EL1 Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
This commit is contained in:
@@ -76,7 +76,6 @@ enum __kvm_host_smccc_func {
|
||||
__KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff,
|
||||
__KVM_HOST_SMCCC_FUNC___vgic_v3_save_vmcr_aprs,
|
||||
__KVM_HOST_SMCCC_FUNC___vgic_v3_restore_vmcr_aprs,
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_init_traps,
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_init_vm,
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_init_vcpu,
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_teardown_vm,
|
||||
|
||||
@@ -693,4 +693,8 @@ static inline bool guest_hyp_sve_traps_enabled(const struct kvm_vcpu *vcpu)
|
||||
return __guest_hyp_cptr_xen_trap_enabled(vcpu, ZEN);
|
||||
}
|
||||
|
||||
static inline void kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu_set_flag(vcpu, GUEST_HAS_PTRAUTH);
|
||||
}
|
||||
#endif /* __ARM64_KVM_EMULATE_H__ */
|
||||
|
||||
@@ -74,8 +74,6 @@ enum kvm_mode kvm_get_mode(void);
|
||||
static inline enum kvm_mode kvm_get_mode(void) { return KVM_MODE_NONE; };
|
||||
#endif
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
|
||||
|
||||
extern unsigned int __ro_after_init kvm_sve_max_vl;
|
||||
extern unsigned int __ro_after_init kvm_host_sve_max_vl;
|
||||
int __init kvm_arm_init_sve(void);
|
||||
|
||||
@@ -206,8 +206,7 @@ void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
|
||||
|
||||
static inline bool userspace_irqchip(struct kvm *kvm)
|
||||
{
|
||||
return static_branch_unlikely(&userspace_irqchip_in_use) &&
|
||||
unlikely(!irqchip_in_kernel(kvm));
|
||||
return unlikely(!irqchip_in_kernel(kvm));
|
||||
}
|
||||
|
||||
static void soft_timer_start(struct hrtimer *hrt, u64 ns)
|
||||
|
||||
@@ -69,7 +69,6 @@ DECLARE_KVM_NVHE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
|
||||
static bool vgic_present, kvm_arm_initialised;
|
||||
|
||||
static DEFINE_PER_CPU(unsigned char, kvm_hyp_initialized);
|
||||
DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
|
||||
|
||||
bool is_kvm_arm_initialised(void)
|
||||
{
|
||||
@@ -503,9 +502,6 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
|
||||
|
||||
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (vcpu_has_run_once(vcpu) && unlikely(!irqchip_in_kernel(vcpu->kvm)))
|
||||
static_branch_dec(&userspace_irqchip_in_use);
|
||||
|
||||
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
|
||||
kvm_timer_vcpu_terminate(vcpu);
|
||||
kvm_pmu_vcpu_destroy(vcpu);
|
||||
@@ -848,22 +844,6 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!irqchip_in_kernel(kvm)) {
|
||||
/*
|
||||
* Tell the rest of the code that there are userspace irqchip
|
||||
* VMs in the wild.
|
||||
*/
|
||||
static_branch_inc(&userspace_irqchip_in_use);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize traps for protected VMs.
|
||||
* NOTE: Move to run in EL2 directly, rather than via a hypercall, once
|
||||
* the code is in place for first run initialization at EL2.
|
||||
*/
|
||||
if (kvm_vm_is_protected(kvm))
|
||||
kvm_call_hyp_nvhe(__pkvm_vcpu_init_traps, vcpu);
|
||||
|
||||
mutex_lock(&kvm->arch.config_lock);
|
||||
set_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags);
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
@@ -1077,7 +1057,7 @@ static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu, int *ret)
|
||||
* state gets updated in kvm_timer_update_run and
|
||||
* kvm_pmu_update_run below).
|
||||
*/
|
||||
if (static_branch_unlikely(&userspace_irqchip_in_use)) {
|
||||
if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
|
||||
if (kvm_timer_should_notify_user(vcpu) ||
|
||||
kvm_pmu_should_notify_user(vcpu)) {
|
||||
*ret = -EINTR;
|
||||
@@ -1199,7 +1179,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
vcpu->mode = OUTSIDE_GUEST_MODE;
|
||||
isb(); /* Ensure work in x_flush_hwstate is committed */
|
||||
kvm_pmu_sync_hwstate(vcpu);
|
||||
if (static_branch_unlikely(&userspace_irqchip_in_use))
|
||||
if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
|
||||
kvm_timer_sync_user(vcpu);
|
||||
kvm_vgic_sync_hwstate(vcpu);
|
||||
local_irq_enable();
|
||||
@@ -1245,7 +1225,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
* we don't want vtimer interrupts to race with syncing the
|
||||
* timer virtual interrupt state.
|
||||
*/
|
||||
if (static_branch_unlikely(&userspace_irqchip_in_use))
|
||||
if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
|
||||
kvm_timer_sync_user(vcpu);
|
||||
|
||||
kvm_arch_vcpu_ctxsync_fp(vcpu);
|
||||
|
||||
@@ -15,6 +15,4 @@
|
||||
#define DECLARE_REG(type, name, ctxt, reg) \
|
||||
type name = (type)cpu_reg(ctxt, (reg))
|
||||
|
||||
void __pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu);
|
||||
|
||||
#endif /* __ARM64_KVM_NVHE_TRAP_HANDLER_H__ */
|
||||
|
||||
@@ -105,8 +105,10 @@ static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
|
||||
|
||||
hyp_vcpu->vcpu.arch.hw_mmu = host_vcpu->arch.hw_mmu;
|
||||
|
||||
hyp_vcpu->vcpu.arch.hcr_el2 = host_vcpu->arch.hcr_el2;
|
||||
hyp_vcpu->vcpu.arch.mdcr_el2 = host_vcpu->arch.mdcr_el2;
|
||||
hyp_vcpu->vcpu.arch.hcr_el2 &= ~(HCR_TWI | HCR_TWE);
|
||||
hyp_vcpu->vcpu.arch.hcr_el2 |= READ_ONCE(host_vcpu->arch.hcr_el2) &
|
||||
(HCR_TWI | HCR_TWE);
|
||||
|
||||
hyp_vcpu->vcpu.arch.iflags = host_vcpu->arch.iflags;
|
||||
|
||||
@@ -349,13 +351,6 @@ static void handle___pkvm_prot_finalize(struct kvm_cpu_context *host_ctxt)
|
||||
cpu_reg(host_ctxt, 1) = __pkvm_prot_finalize();
|
||||
}
|
||||
|
||||
static void handle___pkvm_vcpu_init_traps(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
|
||||
|
||||
__pkvm_vcpu_init_traps(kern_hyp_va(vcpu));
|
||||
}
|
||||
|
||||
static void handle___pkvm_init_vm(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
DECLARE_REG(struct kvm *, host_kvm, host_ctxt, 1);
|
||||
@@ -411,7 +406,6 @@ static const hcall_t host_hcall[] = {
|
||||
HANDLE_FUNC(__kvm_timer_set_cntvoff),
|
||||
HANDLE_FUNC(__vgic_v3_save_vmcr_aprs),
|
||||
HANDLE_FUNC(__vgic_v3_restore_vmcr_aprs),
|
||||
HANDLE_FUNC(__pkvm_vcpu_init_traps),
|
||||
HANDLE_FUNC(__pkvm_init_vm),
|
||||
HANDLE_FUNC(__pkvm_init_vcpu),
|
||||
HANDLE_FUNC(__pkvm_teardown_vm),
|
||||
|
||||
@@ -6,6 +6,9 @@
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/kvm_emulate.h>
|
||||
|
||||
#include <nvhe/fixed_config.h>
|
||||
#include <nvhe/mem_protect.h>
|
||||
#include <nvhe/memory.h>
|
||||
@@ -201,11 +204,46 @@ static void pvm_init_trap_regs(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
static void pkvm_vcpu_reset_hcr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
|
||||
|
||||
if (has_hvhe())
|
||||
vcpu->arch.hcr_el2 |= HCR_E2H;
|
||||
|
||||
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) {
|
||||
/* route synchronous external abort exceptions to EL2 */
|
||||
vcpu->arch.hcr_el2 |= HCR_TEA;
|
||||
/* trap error record accesses */
|
||||
vcpu->arch.hcr_el2 |= HCR_TERR;
|
||||
}
|
||||
|
||||
if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
|
||||
vcpu->arch.hcr_el2 |= HCR_FWB;
|
||||
|
||||
if (cpus_have_final_cap(ARM64_HAS_EVT) &&
|
||||
!cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE))
|
||||
vcpu->arch.hcr_el2 |= HCR_TID4;
|
||||
else
|
||||
vcpu->arch.hcr_el2 |= HCR_TID2;
|
||||
|
||||
if (vcpu_has_ptrauth(vcpu))
|
||||
vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize trap register values in protected mode.
|
||||
*/
|
||||
void __pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu)
|
||||
static void pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);
|
||||
vcpu->arch.mdcr_el2 = 0;
|
||||
|
||||
pkvm_vcpu_reset_hcr(vcpu);
|
||||
|
||||
if ((!vcpu_is_protected(vcpu)))
|
||||
return;
|
||||
|
||||
pvm_init_trap_regs(vcpu);
|
||||
pvm_init_traps_aa64pfr0(vcpu);
|
||||
pvm_init_traps_aa64pfr1(vcpu);
|
||||
@@ -289,6 +327,65 @@ void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
|
||||
hyp_spin_unlock(&vm_table_lock);
|
||||
}
|
||||
|
||||
static void pkvm_init_features_from_host(struct pkvm_hyp_vm *hyp_vm, const struct kvm *host_kvm)
|
||||
{
|
||||
struct kvm *kvm = &hyp_vm->kvm;
|
||||
DECLARE_BITMAP(allowed_features, KVM_VCPU_MAX_FEATURES);
|
||||
|
||||
/* No restrictions for non-protected VMs. */
|
||||
if (!kvm_vm_is_protected(kvm)) {
|
||||
bitmap_copy(kvm->arch.vcpu_features,
|
||||
host_kvm->arch.vcpu_features,
|
||||
KVM_VCPU_MAX_FEATURES);
|
||||
return;
|
||||
}
|
||||
|
||||
bitmap_zero(allowed_features, KVM_VCPU_MAX_FEATURES);
|
||||
|
||||
/*
|
||||
* For protected VMs, always allow:
|
||||
* - CPU starting in poweroff state
|
||||
* - PSCI v0.2
|
||||
*/
|
||||
set_bit(KVM_ARM_VCPU_POWER_OFF, allowed_features);
|
||||
set_bit(KVM_ARM_VCPU_PSCI_0_2, allowed_features);
|
||||
|
||||
/*
|
||||
* Check if remaining features are allowed:
|
||||
* - Performance Monitoring
|
||||
* - Scalable Vectors
|
||||
* - Pointer Authentication
|
||||
*/
|
||||
if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), PVM_ID_AA64DFR0_ALLOW))
|
||||
set_bit(KVM_ARM_VCPU_PMU_V3, allowed_features);
|
||||
|
||||
if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), PVM_ID_AA64PFR0_ALLOW))
|
||||
set_bit(KVM_ARM_VCPU_SVE, allowed_features);
|
||||
|
||||
if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API), PVM_ID_AA64ISAR1_RESTRICT_UNSIGNED) &&
|
||||
FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA), PVM_ID_AA64ISAR1_RESTRICT_UNSIGNED))
|
||||
set_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, allowed_features);
|
||||
|
||||
if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI), PVM_ID_AA64ISAR1_ALLOW) &&
|
||||
FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA), PVM_ID_AA64ISAR1_ALLOW))
|
||||
set_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, allowed_features);
|
||||
|
||||
bitmap_and(kvm->arch.vcpu_features, host_kvm->arch.vcpu_features,
|
||||
allowed_features, KVM_VCPU_MAX_FEATURES);
|
||||
}
|
||||
|
||||
static void pkvm_vcpu_init_ptrauth(struct pkvm_hyp_vcpu *hyp_vcpu)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
|
||||
|
||||
if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_ADDRESS) ||
|
||||
vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_GENERIC)) {
|
||||
kvm_vcpu_enable_ptrauth(vcpu);
|
||||
} else {
|
||||
vcpu_clear_flag(&hyp_vcpu->vcpu, GUEST_HAS_PTRAUTH);
|
||||
}
|
||||
}
|
||||
|
||||
static void unpin_host_vcpu(struct kvm_vcpu *host_vcpu)
|
||||
{
|
||||
if (host_vcpu)
|
||||
@@ -310,6 +407,18 @@ static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
|
||||
hyp_vm->host_kvm = host_kvm;
|
||||
hyp_vm->kvm.created_vcpus = nr_vcpus;
|
||||
hyp_vm->kvm.arch.mmu.vtcr = host_mmu.arch.mmu.vtcr;
|
||||
hyp_vm->kvm.arch.pkvm.enabled = READ_ONCE(host_kvm->arch.pkvm.enabled);
|
||||
pkvm_init_features_from_host(hyp_vm, host_kvm);
|
||||
}
|
||||
|
||||
static void pkvm_vcpu_init_sve(struct pkvm_hyp_vcpu *hyp_vcpu, struct kvm_vcpu *host_vcpu)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
|
||||
|
||||
if (!vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE)) {
|
||||
vcpu_clear_flag(vcpu, GUEST_HAS_SVE);
|
||||
vcpu_clear_flag(vcpu, VCPU_SVE_FINALIZED);
|
||||
}
|
||||
}
|
||||
|
||||
static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
|
||||
@@ -335,6 +444,11 @@ static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
|
||||
|
||||
hyp_vcpu->vcpu.arch.hw_mmu = &hyp_vm->kvm.arch.mmu;
|
||||
hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags);
|
||||
hyp_vcpu->vcpu.arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
|
||||
|
||||
pkvm_vcpu_init_sve(hyp_vcpu, host_vcpu);
|
||||
pkvm_vcpu_init_ptrauth(hyp_vcpu);
|
||||
pkvm_vcpu_init_traps(&hyp_vcpu->vcpu);
|
||||
done:
|
||||
if (ret)
|
||||
unpin_host_vcpu(host_vcpu);
|
||||
|
||||
@@ -95,7 +95,6 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
|
||||
{
|
||||
void *start, *end, *virt = hyp_phys_to_virt(phys);
|
||||
unsigned long pgt_size = hyp_s1_pgtable_pages() << PAGE_SHIFT;
|
||||
enum kvm_pgtable_prot prot;
|
||||
int ret, i;
|
||||
|
||||
/* Recreate the hyp page-table using the early page allocator */
|
||||
@@ -148,22 +147,6 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
|
||||
}
|
||||
|
||||
pkvm_create_host_sve_mappings();
|
||||
|
||||
/*
|
||||
* Map the host sections RO in the hypervisor, but transfer the
|
||||
* ownership from the host to the hypervisor itself to make sure they
|
||||
* can't be donated or shared with another entity.
|
||||
*
|
||||
* The ownership transition requires matching changes in the host
|
||||
* stage-2. This will be done later (see finalize_host_mappings()) once
|
||||
* the hyp_vmemmap is addressable.
|
||||
*/
|
||||
prot = pkvm_mkstate(PAGE_HYP_RO, PKVM_PAGE_SHARED_OWNED);
|
||||
ret = pkvm_create_mappings(&kvm_vgic_global_state,
|
||||
&kvm_vgic_global_state + 1, prot);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -1012,9 +1012,6 @@ static void __vgic_v3_read_ctlr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
|
||||
val = ((vtr >> 29) & 7) << ICC_CTLR_EL1_PRI_BITS_SHIFT;
|
||||
/* IDbits */
|
||||
val |= ((vtr >> 23) & 7) << ICC_CTLR_EL1_ID_BITS_SHIFT;
|
||||
/* SEIS */
|
||||
if (kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_SEIS_MASK)
|
||||
val |= BIT(ICC_CTLR_EL1_SEIS_SHIFT);
|
||||
/* A3V */
|
||||
val |= ((vtr >> 21) & 1) << ICC_CTLR_EL1_A3V_SHIFT;
|
||||
/* EOImode */
|
||||
|
||||
@@ -167,11 +167,6 @@ static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)
|
||||
memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu));
|
||||
}
|
||||
|
||||
static void kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu_set_flag(vcpu, GUEST_HAS_PTRAUTH);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_reset_vcpu - sets core registers and sys_regs to reset value
|
||||
* @vcpu: The VCPU pointer
|
||||
|
||||
@@ -720,9 +720,6 @@ static void __vm_mem_region_delete(struct kvm_vm *vm,
|
||||
rb_erase(®ion->hva_node, &vm->regions.hva_tree);
|
||||
hash_del(®ion->slot_node);
|
||||
|
||||
region->region.memory_size = 0;
|
||||
vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region);
|
||||
|
||||
sparsebit_free(®ion->unused_phy_pages);
|
||||
sparsebit_free(®ion->protected_phy_pages);
|
||||
ret = munmap(region->mmap_start, region->mmap_size);
|
||||
@@ -1197,7 +1194,12 @@ void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa)
|
||||
*/
|
||||
void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot)
|
||||
{
|
||||
__vm_mem_region_delete(vm, memslot2region(vm, slot));
|
||||
struct userspace_mem_region *region = memslot2region(vm, slot);
|
||||
|
||||
region->region.memory_size = 0;
|
||||
vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region);
|
||||
|
||||
__vm_mem_region_delete(vm, region);
|
||||
}
|
||||
|
||||
void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t base, uint64_t size,
|
||||
|
||||
Reference in New Issue
Block a user