KVM: arm64: nv: Implement HW access flag management in stage-2 SW PTW

Give the stage-2 walk similar treatment to stage-1: update the access
flag during the table walk and do so for any walk context.

Reviewed-by: Marc Zyngier <maz@kernel.org>
Tested-by: Marc Zyngier <maz@kernel.org>
Link: https://msgid.link/20251124190158.177318-14-oupton@kernel.org
Signed-off-by: Oliver Upton <oupton@kernel.org>
This commit is contained in:
Oliver Upton
2025-11-24 11:01:55 -08:00
parent bff8aa213d
commit e4c7dfac2f
2 changed files with 42 additions and 7 deletions

View File

@@ -2012,6 +2012,11 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
u32 esr;
ret = kvm_walk_nested_s2(vcpu, fault_ipa, &nested_trans);
if (ret == -EAGAIN) {
ret = 1;
goto out_unlock;
}
if (ret) {
esr = kvm_s2_trans_esr(&nested_trans);
kvm_inject_s2_fault(vcpu, esr);

View File

@@ -124,12 +124,13 @@ int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
}
struct s2_walk_info {
u64 baddr;
unsigned int max_oa_bits;
unsigned int pgshift;
unsigned int sl;
unsigned int t0sz;
bool be;
u64 baddr;
unsigned int max_oa_bits;
unsigned int pgshift;
unsigned int sl;
unsigned int t0sz;
bool be;
bool ha;
};
static u32 compute_fsc(int level, u32 fsc)
@@ -219,6 +220,20 @@ static int read_guest_s2_desc(struct kvm_vcpu *vcpu, phys_addr_t pa, u64 *desc,
return 0;
}
static int swap_guest_s2_desc(struct kvm_vcpu *vcpu, phys_addr_t pa, u64 old, u64 new,
struct s2_walk_info *wi)
{
if (wi->be) {
old = cpu_to_be64(old);
new = cpu_to_be64(new);
} else {
old = cpu_to_le64(old);
new = cpu_to_le64(new);
}
return __kvm_at_swap_desc(vcpu->kvm, pa, old, new);
}
/*
* This is essentially a C-version of the pseudo code from the ARM ARM
* AArch64.TranslationTableWalk function. I strongly recommend looking at
@@ -232,7 +247,7 @@ static int walk_nested_s2_pgd(struct kvm_vcpu *vcpu, phys_addr_t ipa,
int first_block_level, level, stride, input_size, base_lower_bound;
phys_addr_t base_addr;
unsigned int addr_top, addr_bottom;
u64 desc; /* page table entry */
u64 desc, new_desc; /* page table entry */
int ret;
phys_addr_t paddr;
@@ -281,6 +296,8 @@ static int walk_nested_s2_pgd(struct kvm_vcpu *vcpu, phys_addr_t ipa,
if (ret < 0)
return ret;
new_desc = desc;
/* Check for valid descriptor at this point */
if (!(desc & KVM_PTE_VALID)) {
out->esr = compute_fsc(level, ESR_ELx_FSC_FAULT);
@@ -325,6 +342,17 @@ static int walk_nested_s2_pgd(struct kvm_vcpu *vcpu, phys_addr_t ipa,
return 1;
}
if (wi->ha)
new_desc |= KVM_PTE_LEAF_ATTR_LO_S2_AF;
if (new_desc != desc) {
ret = swap_guest_s2_desc(vcpu, paddr, desc, new_desc, wi);
if (ret)
return ret;
desc = new_desc;
}
if (!(desc & KVM_PTE_LEAF_ATTR_LO_S2_AF)) {
out->esr = compute_fsc(level, ESR_ELx_FSC_ACCESS);
out->desc = desc;
@@ -363,6 +391,8 @@ static void vtcr_to_walk_info(u64 vtcr, struct s2_walk_info *wi)
/* Global limit for now, should eventually be per-VM */
wi->max_oa_bits = min(get_kvm_ipa_limit(),
ps_to_output_size(FIELD_GET(VTCR_EL2_PS_MASK, vtcr), false));
wi->ha = vtcr & VTCR_EL2_HA;
}
int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,