KVM: arm64: GICv3: Add SPI tracking to handle asymmetric deactivation

SPIs are specially annpying, as they can be activated on a CPU and
deactivated on another. WHich means that when an SPI is in flight
anywhere, all CPUs need to have their TDIR trap bit set.

This translates into broadcasting an IPI across all CPUs to make sure
they set their trap bit, The number of in-flight SPIs is kept in
an atomic variable so that CPUs can turn the trap bit off as soon
as possible.

Tested-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Tested-by: Mark Brown <broonie@kernel.org>
Link: https://msgid.link/20251120172540.2267180-32-maz@kernel.org
Signed-off-by: Oliver Upton <oupton@kernel.org>
This commit is contained in:
Marc Zyngier
2025-11-20 17:25:21 +00:00
committed by Oliver Upton
parent 70fd60bded
commit 1c3b3cadcd
4 changed files with 42 additions and 8 deletions

View File

@@ -188,6 +188,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
struct kvm_vcpu *vcpu0 = kvm_get_vcpu(kvm, 0);
int i;
dist->active_spis = (atomic_t)ATOMIC_INIT(0);
dist->spis = kcalloc(nr_spis, sizeof(struct vgic_irq), GFP_KERNEL_ACCOUNT);
if (!dist->spis)
return -ENOMEM;

View File

@@ -47,10 +47,17 @@ void vgic_v3_configure_hcr(struct kvm_vcpu *vcpu,
ICH_HCR_EL2_VGrp1DIE : ICH_HCR_EL2_VGrp1EIE;
/*
* Dealing with EOImode=1 is a massive source of headache. Not
* only do we need to track that we have active interrupts
* outside of the LRs and force DIR to be trapped, we also
* need to deal with SPIs that can be deactivated on another
* CPU.
*
* Note that we set the trap irrespective of EOIMode, as that
* can change behind our back without any warning...
*/
if (irqs_active_outside_lrs(als))
if (irqs_active_outside_lrs(als) ||
atomic_read(&vcpu->kvm->arch.vgic.active_spis))
cpuif->vgic_hcr |= ICH_HCR_EL2_TDIR;
}
@@ -78,11 +85,6 @@ static void vgic_v3_fold_lr(struct kvm_vcpu *vcpu, u64 val)
if (!irq) /* An LPI could have been unmapped. */
return;
/* Notify fds when the guest EOI'ed a level-triggered IRQ */
if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
kvm_notify_acked_irq(vcpu->kvm, 0,
intid - VGIC_NR_PRIVATE_IRQS);
scoped_guard(raw_spinlock, &irq->irq_lock) {
/* Always preserve the active bit for !LPIs, note deactivation */
if (irq->intid >= VGIC_MIN_LPI)
@@ -117,6 +119,13 @@ static void vgic_v3_fold_lr(struct kvm_vcpu *vcpu, u64 val)
irq->on_lr = false;
}
/* Notify fds when the guest EOI'ed a level-triggered SPI, and drop the refcount */
if (deactivated && lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid)) {
kvm_notify_acked_irq(vcpu->kvm, 0,
intid - VGIC_NR_PRIVATE_IRQS);
atomic_dec_if_positive(&vcpu->kvm->arch.vgic.active_spis);
}
vgic_put_irq(vcpu->kvm, irq);
}

View File

@@ -367,6 +367,17 @@ static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owne
return false;
}
static bool vgic_model_needs_bcst_kick(struct kvm *kvm)
{
/*
* A GICv3 (or GICv3-like) system exposing a GICv3 to the
* guest needs a broadcast kick to set TDIR globally, even if
* the bit doesn't really exist (we still need to check for
* the shadow bit in the DIR emulation fast-path).
*/
return (kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3);
}
/*
* Check whether an IRQ needs to (and can) be queued to a VCPU's ap list.
* Do the queuing if necessary, taking the right locks in the right order.
@@ -379,6 +390,7 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
unsigned long flags) __releases(&irq->irq_lock)
{
struct kvm_vcpu *vcpu;
bool bcast;
lockdep_assert_held(&irq->irq_lock);
@@ -453,11 +465,20 @@ retry:
list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
irq->vcpu = vcpu;
/* A new SPI may result in deactivation trapping on all vcpus */
bcast = (vgic_model_needs_bcst_kick(vcpu->kvm) &&
vgic_valid_spi(vcpu->kvm, irq->intid) &&
atomic_fetch_inc(&vcpu->kvm->arch.vgic.active_spis) == 0);
raw_spin_unlock(&irq->irq_lock);
raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
kvm_vcpu_kick(vcpu);
if (!bcast) {
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
kvm_vcpu_kick(vcpu);
} else {
kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_IRQ_PENDING);
}
return true;
}

View File

@@ -263,6 +263,9 @@ struct vgic_dist {
/* The GIC maintenance IRQ for nested hypervisors. */
u32 mi_intid;
/* Track the number of in-flight active SPIs */
atomic_t active_spis;
/* base addresses in guest physical address space: */
gpa_t vgic_dist_base; /* distributor */
union {