mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
Most implementations cache the combined result of two-stage translation, but some, like Andes cores, use split TLBs that store VS-stage and G-stage entries separately. On such systems, when a VCPU migrates to another CPU, an additional HFENCE.VVMA is required to avoid using stale VS-stage entries, which could otherwise cause guest faults. Introduce a static key to identify CPUs with split two-stage TLBs. When enabled, KVM issues an extra HFENCE.VVMA on VCPU migration to prevent stale VS-stage mappings. Signed-off-by: Hui Min Mina Chou <minachou@andestech.com> Signed-off-by: Ben Zong-You Xie <ben717@andestech.com> Reviewed-by: Radim Krčmář <rkrcmar@ventanamicro.com> Reviewed-by: Nutty Liu <nutty.liu@hotmail.com> Link: https://lore.kernel.org/r/20251117084555.157642-1-minachou@andestech.com Signed-off-by: Anup Patel <anup@brainfault.org>
86 lines
2.8 KiB
C
86 lines
2.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (c) 2025 Ventana Micro Systems Inc.
|
|
*/
|
|
|
|
#ifndef __RISCV_KVM_TLB_H_
|
|
#define __RISCV_KVM_TLB_H_
|
|
|
|
#include <linux/kvm_types.h>
|
|
|
|
enum kvm_riscv_hfence_type {
|
|
KVM_RISCV_HFENCE_UNKNOWN = 0,
|
|
KVM_RISCV_HFENCE_GVMA_VMID_GPA,
|
|
KVM_RISCV_HFENCE_GVMA_VMID_ALL,
|
|
KVM_RISCV_HFENCE_VVMA_ASID_GVA,
|
|
KVM_RISCV_HFENCE_VVMA_ASID_ALL,
|
|
KVM_RISCV_HFENCE_VVMA_GVA,
|
|
KVM_RISCV_HFENCE_VVMA_ALL
|
|
};
|
|
|
|
struct kvm_riscv_hfence {
|
|
enum kvm_riscv_hfence_type type;
|
|
unsigned long asid;
|
|
unsigned long vmid;
|
|
unsigned long order;
|
|
gpa_t addr;
|
|
gpa_t size;
|
|
};
|
|
|
|
#define KVM_RISCV_VCPU_MAX_HFENCE 64
|
|
|
|
#define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12
|
|
|
|
void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
|
|
gpa_t gpa, gpa_t gpsz,
|
|
unsigned long order);
|
|
void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid);
|
|
void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
|
|
unsigned long order);
|
|
void kvm_riscv_local_hfence_gvma_all(void);
|
|
void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
|
|
unsigned long asid,
|
|
unsigned long gva,
|
|
unsigned long gvsz,
|
|
unsigned long order);
|
|
void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
|
|
unsigned long asid);
|
|
void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
|
|
unsigned long gva, unsigned long gvsz,
|
|
unsigned long order);
|
|
void kvm_riscv_local_hfence_vvma_all(unsigned long vmid);
|
|
void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_riscv_tlb_flush_process(struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu);
|
|
void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu);
|
|
void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_riscv_fence_i(struct kvm *kvm,
|
|
unsigned long hbase, unsigned long hmask);
|
|
void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
|
|
unsigned long hbase, unsigned long hmask,
|
|
gpa_t gpa, gpa_t gpsz,
|
|
unsigned long order, unsigned long vmid);
|
|
void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
|
|
unsigned long hbase, unsigned long hmask,
|
|
unsigned long vmid);
|
|
void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
|
|
unsigned long hbase, unsigned long hmask,
|
|
unsigned long gva, unsigned long gvsz,
|
|
unsigned long order, unsigned long asid,
|
|
unsigned long vmid);
|
|
void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
|
|
unsigned long hbase, unsigned long hmask,
|
|
unsigned long asid, unsigned long vmid);
|
|
void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
|
|
unsigned long hbase, unsigned long hmask,
|
|
unsigned long gva, unsigned long gvsz,
|
|
unsigned long order, unsigned long vmid);
|
|
void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
|
|
unsigned long hbase, unsigned long hmask,
|
|
unsigned long vmid);
|
|
|
|
#endif
|