mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
KVM: S390: Remove sca_lock
Since we are no longer switching from a BSCA to a ESCA we can completely get rid of the sca_lock. The write lock was only taken for that conversion. After removal of the lock some local code cleanups are possible. Signed-off-by: Christoph Schlameuss <schlameuss@linux.ibm.com> Suggested-by: Janosch Frank <frankja@linux.ibm.com> [frankja@linux.ibm.com: Added suggested-by tag as discussed on list] Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
This commit is contained in:
committed by
Janosch Frank
parent
e72753ed12
commit
14542a0a54
@@ -633,7 +633,6 @@ struct kvm_s390_pv {
|
||||
|
||||
struct kvm_arch {
|
||||
struct esca_block *sca;
|
||||
rwlock_t sca_lock;
|
||||
debug_info_t *dbf;
|
||||
struct kvm_s390_float_interrupt float_int;
|
||||
struct kvm_device *flic;
|
||||
|
||||
@@ -109,14 +109,9 @@ struct aste {
|
||||
|
||||
int ipte_lock_held(struct kvm *kvm)
|
||||
{
|
||||
if (sclp.has_siif) {
|
||||
int rc;
|
||||
if (sclp.has_siif)
|
||||
return kvm->arch.sca->ipte_control.kh != 0;
|
||||
|
||||
read_lock(&kvm->arch.sca_lock);
|
||||
rc = kvm->arch.sca->ipte_control.kh != 0;
|
||||
read_unlock(&kvm->arch.sca_lock);
|
||||
return rc;
|
||||
}
|
||||
return kvm->arch.ipte_lock_count != 0;
|
||||
}
|
||||
|
||||
@@ -129,19 +124,16 @@ static void ipte_lock_simple(struct kvm *kvm)
|
||||
if (kvm->arch.ipte_lock_count > 1)
|
||||
goto out;
|
||||
retry:
|
||||
read_lock(&kvm->arch.sca_lock);
|
||||
ic = &kvm->arch.sca->ipte_control;
|
||||
old = READ_ONCE(*ic);
|
||||
do {
|
||||
if (old.k) {
|
||||
read_unlock(&kvm->arch.sca_lock);
|
||||
cond_resched();
|
||||
goto retry;
|
||||
}
|
||||
new = old;
|
||||
new.k = 1;
|
||||
} while (!try_cmpxchg(&ic->val, &old.val, new.val));
|
||||
read_unlock(&kvm->arch.sca_lock);
|
||||
out:
|
||||
mutex_unlock(&kvm->arch.ipte_mutex);
|
||||
}
|
||||
@@ -154,14 +146,12 @@ static void ipte_unlock_simple(struct kvm *kvm)
|
||||
kvm->arch.ipte_lock_count--;
|
||||
if (kvm->arch.ipte_lock_count)
|
||||
goto out;
|
||||
read_lock(&kvm->arch.sca_lock);
|
||||
ic = &kvm->arch.sca->ipte_control;
|
||||
old = READ_ONCE(*ic);
|
||||
do {
|
||||
new = old;
|
||||
new.k = 0;
|
||||
} while (!try_cmpxchg(&ic->val, &old.val, new.val));
|
||||
read_unlock(&kvm->arch.sca_lock);
|
||||
wake_up(&kvm->arch.ipte_wq);
|
||||
out:
|
||||
mutex_unlock(&kvm->arch.ipte_mutex);
|
||||
@@ -172,12 +162,10 @@ static void ipte_lock_siif(struct kvm *kvm)
|
||||
union ipte_control old, new, *ic;
|
||||
|
||||
retry:
|
||||
read_lock(&kvm->arch.sca_lock);
|
||||
ic = &kvm->arch.sca->ipte_control;
|
||||
old = READ_ONCE(*ic);
|
||||
do {
|
||||
if (old.kg) {
|
||||
read_unlock(&kvm->arch.sca_lock);
|
||||
cond_resched();
|
||||
goto retry;
|
||||
}
|
||||
@@ -185,14 +173,12 @@ retry:
|
||||
new.k = 1;
|
||||
new.kh++;
|
||||
} while (!try_cmpxchg(&ic->val, &old.val, new.val));
|
||||
read_unlock(&kvm->arch.sca_lock);
|
||||
}
|
||||
|
||||
static void ipte_unlock_siif(struct kvm *kvm)
|
||||
{
|
||||
union ipte_control old, new, *ic;
|
||||
|
||||
read_lock(&kvm->arch.sca_lock);
|
||||
ic = &kvm->arch.sca->ipte_control;
|
||||
old = READ_ONCE(*ic);
|
||||
do {
|
||||
@@ -201,7 +187,6 @@ static void ipte_unlock_siif(struct kvm *kvm)
|
||||
if (!new.kh)
|
||||
new.k = 0;
|
||||
} while (!try_cmpxchg(&ic->val, &old.val, new.val));
|
||||
read_unlock(&kvm->arch.sca_lock);
|
||||
if (!new.kh)
|
||||
wake_up(&kvm->arch.ipte_wq);
|
||||
}
|
||||
|
||||
@@ -45,48 +45,34 @@ static struct kvm_s390_gib *gib;
|
||||
/* handle external calls via sigp interpretation facility */
|
||||
static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
|
||||
{
|
||||
union esca_sigp_ctrl sigp_ctrl;
|
||||
struct esca_block *sca;
|
||||
int c, scn;
|
||||
struct esca_block *sca = vcpu->kvm->arch.sca;
|
||||
union esca_sigp_ctrl sigp_ctrl = sca->cpu[vcpu->vcpu_id].sigp_ctrl;
|
||||
|
||||
if (!kvm_s390_test_cpuflags(vcpu, CPUSTAT_ECALL_PEND))
|
||||
return 0;
|
||||
|
||||
BUG_ON(!kvm_s390_use_sca_entries());
|
||||
read_lock(&vcpu->kvm->arch.sca_lock);
|
||||
sca = vcpu->kvm->arch.sca;
|
||||
sigp_ctrl = sca->cpu[vcpu->vcpu_id].sigp_ctrl;
|
||||
|
||||
c = sigp_ctrl.c;
|
||||
scn = sigp_ctrl.scn;
|
||||
read_unlock(&vcpu->kvm->arch.sca_lock);
|
||||
|
||||
if (src_id)
|
||||
*src_id = scn;
|
||||
*src_id = sigp_ctrl.scn;
|
||||
|
||||
return c;
|
||||
return sigp_ctrl.c;
|
||||
}
|
||||
|
||||
static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
|
||||
{
|
||||
union esca_sigp_ctrl old_val, new_val = {0};
|
||||
union esca_sigp_ctrl *sigp_ctrl;
|
||||
struct esca_block *sca;
|
||||
struct esca_block *sca = vcpu->kvm->arch.sca;
|
||||
union esca_sigp_ctrl *sigp_ctrl = &sca->cpu[vcpu->vcpu_id].sigp_ctrl;
|
||||
union esca_sigp_ctrl old_val, new_val = {.scn = src_id, .c = 1};
|
||||
int expect, rc;
|
||||
|
||||
BUG_ON(!kvm_s390_use_sca_entries());
|
||||
read_lock(&vcpu->kvm->arch.sca_lock);
|
||||
sca = vcpu->kvm->arch.sca;
|
||||
sigp_ctrl = &sca->cpu[vcpu->vcpu_id].sigp_ctrl;
|
||||
|
||||
old_val = READ_ONCE(*sigp_ctrl);
|
||||
new_val.scn = src_id;
|
||||
new_val.c = 1;
|
||||
old_val.c = 0;
|
||||
|
||||
expect = old_val.value;
|
||||
rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
|
||||
read_unlock(&vcpu->kvm->arch.sca_lock);
|
||||
|
||||
if (rc != expect) {
|
||||
/* another external call is pending */
|
||||
@@ -98,18 +84,14 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
|
||||
|
||||
static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
union esca_sigp_ctrl *sigp_ctrl;
|
||||
struct esca_block *sca;
|
||||
struct esca_block *sca = vcpu->kvm->arch.sca;
|
||||
union esca_sigp_ctrl *sigp_ctrl = &sca->cpu[vcpu->vcpu_id].sigp_ctrl;
|
||||
|
||||
if (!kvm_s390_use_sca_entries())
|
||||
return;
|
||||
kvm_s390_clear_cpuflags(vcpu, CPUSTAT_ECALL_PEND);
|
||||
read_lock(&vcpu->kvm->arch.sca_lock);
|
||||
sca = vcpu->kvm->arch.sca;
|
||||
sigp_ctrl = &sca->cpu[vcpu->vcpu_id].sigp_ctrl;
|
||||
|
||||
WRITE_ONCE(sigp_ctrl->value, 0);
|
||||
read_unlock(&vcpu->kvm->arch.sca_lock);
|
||||
}
|
||||
|
||||
int psw_extint_disabled(struct kvm_vcpu *vcpu)
|
||||
|
||||
@@ -1938,14 +1938,12 @@ static void kvm_s390_update_topology_change_report(struct kvm *kvm, bool val)
|
||||
union sca_utility new, old;
|
||||
struct esca_block *sca;
|
||||
|
||||
read_lock(&kvm->arch.sca_lock);
|
||||
sca = kvm->arch.sca;
|
||||
old = READ_ONCE(sca->utility);
|
||||
do {
|
||||
new = old;
|
||||
new.mtcr = val;
|
||||
} while (!try_cmpxchg(&sca->utility.val, &old.val, new.val));
|
||||
read_unlock(&kvm->arch.sca_lock);
|
||||
}
|
||||
|
||||
static int kvm_s390_set_topo_change_indication(struct kvm *kvm,
|
||||
@@ -1966,9 +1964,7 @@ static int kvm_s390_get_topo_change_indication(struct kvm *kvm,
|
||||
if (!test_kvm_facility(kvm, 11))
|
||||
return -ENXIO;
|
||||
|
||||
read_lock(&kvm->arch.sca_lock);
|
||||
topo = kvm->arch.sca->utility.mtcr;
|
||||
read_unlock(&kvm->arch.sca_lock);
|
||||
|
||||
return put_user(topo, (u8 __user *)attr->addr);
|
||||
}
|
||||
@@ -3345,7 +3341,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
|
||||
if (!sclp.has_64bscao)
|
||||
alloc_flags |= GFP_DMA;
|
||||
rwlock_init(&kvm->arch.sca_lock);
|
||||
mutex_lock(&kvm_lock);
|
||||
|
||||
kvm->arch.sca = alloc_pages_exact(sizeof(*kvm->arch.sca), alloc_flags);
|
||||
@@ -3530,41 +3525,30 @@ static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
|
||||
|
||||
static void sca_del_vcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct esca_block *sca;
|
||||
struct esca_block *sca = vcpu->kvm->arch.sca;
|
||||
|
||||
if (!kvm_s390_use_sca_entries())
|
||||
return;
|
||||
read_lock(&vcpu->kvm->arch.sca_lock);
|
||||
sca = vcpu->kvm->arch.sca;
|
||||
|
||||
clear_bit_inv(vcpu->vcpu_id, (unsigned long *)sca->mcn);
|
||||
sca->cpu[vcpu->vcpu_id].sda = 0;
|
||||
read_unlock(&vcpu->kvm->arch.sca_lock);
|
||||
}
|
||||
|
||||
static void sca_add_vcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct esca_block *sca;
|
||||
phys_addr_t sca_phys;
|
||||
struct esca_block *sca = vcpu->kvm->arch.sca;
|
||||
phys_addr_t sca_phys = virt_to_phys(sca);
|
||||
|
||||
if (!kvm_s390_use_sca_entries()) {
|
||||
sca_phys = virt_to_phys(vcpu->kvm->arch.sca);
|
||||
|
||||
/* we still need the basic sca for the ipte control */
|
||||
vcpu->arch.sie_block->scaoh = sca_phys >> 32;
|
||||
vcpu->arch.sie_block->scaol = sca_phys;
|
||||
return;
|
||||
}
|
||||
read_lock(&vcpu->kvm->arch.sca_lock);
|
||||
sca = vcpu->kvm->arch.sca;
|
||||
sca_phys = virt_to_phys(sca);
|
||||
|
||||
sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block);
|
||||
/* we still need the sca header for the ipte control */
|
||||
vcpu->arch.sie_block->scaoh = sca_phys >> 32;
|
||||
vcpu->arch.sie_block->scaol = sca_phys & ESCA_SCAOL_MASK;
|
||||
vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
|
||||
|
||||
if (!kvm_s390_use_sca_entries())
|
||||
return;
|
||||
|
||||
set_bit_inv(vcpu->vcpu_id, (unsigned long *)sca->mcn);
|
||||
read_unlock(&vcpu->kvm->arch.sca_lock);
|
||||
sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block);
|
||||
}
|
||||
|
||||
static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
|
||||
|
||||
Reference in New Issue
Block a user