KVM: arm64: GICv3: Extract LR folding primitive

As we are going to need to handle deactivation for interrupts that
are not in the LRs, split vgic_v3_fold_lr_state() into a helper
that deals with a single interrupt, and the function that loops
over the used LRs.

Tested-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Tested-by: Mark Brown <broonie@kernel.org>
Link: https://msgid.link/20251120172540.2267180-14-maz@kernel.org
Signed-off-by: Oliver Upton <oupton@kernel.org>
This commit is contained in:
Marc Zyngier
2025-11-20 17:25:03 +00:00
committed by Oliver Upton
parent 00c6d0d4a8
commit 438e47b697

View File

@@ -33,78 +33,76 @@ static bool lr_signals_eoi_mi(u64 lr_val)
!(lr_val & ICH_LR_HW);
}
void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
static void vgic_v3_fold_lr(struct kvm_vcpu *vcpu, u64 val)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3;
u32 model = vcpu->kvm->arch.vgic.vgic_model;
int lr;
struct vgic_irq *irq;
bool is_v2_sgi = false;
bool deactivated;
u32 intid;
DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
intid = val & ICH_LR_VIRTUAL_ID_MASK;
} else {
intid = val & GICH_LR_VIRTUALID;
is_v2_sgi = vgic_irq_is_sgi(intid);
}
cpuif->vgic_hcr &= ~ICH_HCR_EL2_UIE;
irq = vgic_get_vcpu_irq(vcpu, intid);
if (!irq) /* An LPI could have been unmapped. */
return;
for (lr = 0; lr < cpuif->used_lrs; lr++) {
u64 val = cpuif->vgic_lr[lr];
u32 intid, cpuid;
struct vgic_irq *irq;
bool is_v2_sgi = false;
bool deactivated;
cpuid = val & GICH_LR_PHYSID_CPUID;
cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
if (model == KVM_DEV_TYPE_ARM_VGIC_V3) {
intid = val & ICH_LR_VIRTUAL_ID_MASK;
} else {
intid = val & GICH_LR_VIRTUALID;
is_v2_sgi = vgic_irq_is_sgi(intid);
}
/* Notify fds when the guest EOI'ed a level-triggered IRQ */
if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
kvm_notify_acked_irq(vcpu->kvm, 0,
intid - VGIC_NR_PRIVATE_IRQS);
irq = vgic_get_vcpu_irq(vcpu, intid);
if (!irq) /* An LPI could have been unmapped. */
continue;
raw_spin_lock(&irq->irq_lock);
/* Notify fds when the guest EOI'ed a level-triggered IRQ */
if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
kvm_notify_acked_irq(vcpu->kvm, 0,
intid - VGIC_NR_PRIVATE_IRQS);
scoped_guard(raw_spinlock, &irq->irq_lock) {
/* Always preserve the active bit for !LPIs, note deactivation */
if (irq->intid >= VGIC_MIN_LPI)
val &= ~ICH_LR_ACTIVE_BIT;
deactivated = irq->active && !(val & ICH_LR_ACTIVE_BIT);
irq->active = !!(val & ICH_LR_ACTIVE_BIT);
if (irq->active && is_v2_sgi)
irq->active_source = cpuid;
/* Edge is the only case where we preserve the pending bit */
if (irq->config == VGIC_CONFIG_EDGE &&
(val & ICH_LR_PENDING_BIT)) {
(val & ICH_LR_PENDING_BIT))
irq->pending_latch = true;
if (is_v2_sgi)
irq->source |= (1 << cpuid);
}
/*
* Clear soft pending state when level irqs have been acked.
*/
if (irq->config == VGIC_CONFIG_LEVEL && !(val & ICH_LR_STATE))
irq->pending_latch = false;
if (is_v2_sgi) {
u8 cpuid = FIELD_GET(GICH_LR_PHYSID_CPUID, val);
if (irq->active)
irq->active_source = cpuid;
if (val & ICH_LR_PENDING_BIT)
irq->source |= BIT(cpuid);
}
/* Handle resampling for mapped interrupts if required */
vgic_irq_handle_resampling(irq, deactivated, val & ICH_LR_PENDING_BIT);
irq->on_lr = false;
raw_spin_unlock(&irq->irq_lock);
vgic_put_irq(vcpu->kvm, irq);
}
vgic_put_irq(vcpu->kvm, irq);
}
void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3;
DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
for (int lr = 0; lr < cpuif->used_lrs; lr++)
vgic_v3_fold_lr(vcpu, cpuif->vgic_lr[lr]);
cpuif->used_lrs = 0;
}