mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
KVM: arm64: Implement HW access flag management in stage-1 SW PTW
Atomically update the Access flag at stage-1 when the guest has configured the MMU to do so. Make the implementation choice (and liberal interpretation of speculation) that any access type updates the Access flag, including AT and CMO instructions. Restart the entire walk by returning to the exception-generating instruction in the case of a failed Access flag update. Reviewed-by: Marc Zyngier <maz@kernel.org> Tested-by: Marc Zyngier <maz@kernel.org> Link: https://msgid.link/20251124190158.177318-13-oupton@kernel.org Signed-off-by: Oliver Upton <oupton@kernel.org>
This commit is contained in:
@@ -353,6 +353,7 @@ struct s1_walk_info {
|
||||
bool be;
|
||||
bool s2;
|
||||
bool pa52bit;
|
||||
bool ha;
|
||||
};
|
||||
|
||||
struct s1_walk_result {
|
||||
|
||||
@@ -346,6 +346,8 @@ static int setup_s1_walk(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
|
||||
|
||||
wi->baddr &= GENMASK_ULL(wi->max_oa_bits - 1, x);
|
||||
|
||||
wi->ha = tcr & TCR_HA;
|
||||
|
||||
return 0;
|
||||
|
||||
addrsz:
|
||||
@@ -380,10 +382,24 @@ static int kvm_read_s1_desc(struct kvm_vcpu *vcpu, u64 pa, u64 *desc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kvm_swap_s1_desc(struct kvm_vcpu *vcpu, u64 pa, u64 old, u64 new,
|
||||
struct s1_walk_info *wi)
|
||||
{
|
||||
if (wi->be) {
|
||||
old = cpu_to_be64(old);
|
||||
new = cpu_to_be64(new);
|
||||
} else {
|
||||
old = cpu_to_le64(old);
|
||||
new = cpu_to_le64(new);
|
||||
}
|
||||
|
||||
return __kvm_at_swap_desc(vcpu->kvm, pa, old, new);
|
||||
}
|
||||
|
||||
static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
|
||||
struct s1_walk_result *wr, u64 va)
|
||||
{
|
||||
u64 va_top, va_bottom, baddr, desc;
|
||||
u64 va_top, va_bottom, baddr, desc, new_desc, ipa;
|
||||
int level, stride, ret;
|
||||
|
||||
level = wi->sl;
|
||||
@@ -393,7 +409,7 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
|
||||
va_top = get_ia_size(wi) - 1;
|
||||
|
||||
while (1) {
|
||||
u64 index, ipa;
|
||||
u64 index;
|
||||
|
||||
va_bottom = (3 - level) * stride + wi->pgshift;
|
||||
index = (va & GENMASK_ULL(va_top, va_bottom)) >> (va_bottom - 3);
|
||||
@@ -438,6 +454,8 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
|
||||
return ret;
|
||||
}
|
||||
|
||||
new_desc = desc;
|
||||
|
||||
/* Invalid descriptor */
|
||||
if (!(desc & BIT(0)))
|
||||
goto transfault;
|
||||
@@ -490,6 +508,17 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
|
||||
if (check_output_size(baddr & GENMASK(52, va_bottom), wi))
|
||||
goto addrsz;
|
||||
|
||||
if (wi->ha)
|
||||
new_desc |= PTE_AF;
|
||||
|
||||
if (new_desc != desc) {
|
||||
ret = kvm_swap_s1_desc(vcpu, ipa, desc, new_desc, wi);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
desc = new_desc;
|
||||
}
|
||||
|
||||
if (!(desc & PTE_AF)) {
|
||||
fail_s1_walk(wr, ESR_ELx_FSC_ACCESS_L(level), false);
|
||||
return -EACCES;
|
||||
|
||||
Reference in New Issue
Block a user