Since we are no longer switching from a BSCA to a ESCA we can completely get rid of the sca_lock. The write lock was only taken for that conversion. After removal of the lock some local code cleanups are possible. Signed-off-by: Christoph Schlameuss <schlameuss@xxxxxxxxxxxxx> --- The patch depends on "KVM: s390: Use ESCA instead of BSCA at VM init" Link: https://lore.kernel.org/r/20250603-rm-bsca-v5-1-f691288ada5c@xxxxxxxxxxxxx Checkpatch fails for a already preexisting BUG macro. --- Changes in v2: - rebase on 20250513-rm-bsca-ab1e8649aca7:v7 - directly init new_val with correct value in sca_inject_ext_call() - Link to v1: https://lore.kernel.org/r/20250603-rm-sca-lock-v1-1-9793548480ea@xxxxxxxxxxxxx --- arch/s390/include/asm/kvm_host.h | 1 - arch/s390/kvm/gaccess.c | 19 ++----------------- arch/s390/kvm/interrupt.c | 36 +++++++++--------------------------- arch/s390/kvm/kvm-s390.c | 34 +++++++++------------------------- 4 files changed, 20 insertions(+), 70 deletions(-) diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 4d651e6e8b12ecd7796070e9da659b0b2b94d302..b6761a9aaed73233dc4138462c71cf0cdf2ef56a 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -633,7 +633,6 @@ struct kvm_s390_pv { struct kvm_arch { struct esca_block *sca; - rwlock_t sca_lock; debug_info_t *dbf; struct kvm_s390_float_interrupt float_int; struct kvm_device *flic; diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index 586c0c5ecf131c2298e662aa85d4c9ae127738d9..6fdb4d04316a70d122636312ed119f0d2d3a698e 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -109,14 +109,9 @@ struct aste { int ipte_lock_held(struct kvm *kvm) { - if (sclp.has_siif) { - int rc; + if (sclp.has_siif) + return kvm->arch.sca->ipte_control.kh != 0; - read_lock(&kvm->arch.sca_lock); - rc = kvm->arch.sca->ipte_control.kh != 0; - read_unlock(&kvm->arch.sca_lock); - return rc; - } return kvm->arch.ipte_lock_count != 0; } @@ -129,19 +124,16 @@ static void ipte_lock_simple(struct kvm *kvm) if (kvm->arch.ipte_lock_count > 1) goto out; retry: - read_lock(&kvm->arch.sca_lock); ic = &kvm->arch.sca->ipte_control; old = READ_ONCE(*ic); do { if (old.k) { - read_unlock(&kvm->arch.sca_lock); cond_resched(); goto retry; } new = old; new.k = 1; } while (!try_cmpxchg(&ic->val, &old.val, new.val)); - read_unlock(&kvm->arch.sca_lock); out: mutex_unlock(&kvm->arch.ipte_mutex); } @@ -154,14 +146,12 @@ static void ipte_unlock_simple(struct kvm *kvm) kvm->arch.ipte_lock_count--; if (kvm->arch.ipte_lock_count) goto out; - read_lock(&kvm->arch.sca_lock); ic = &kvm->arch.sca->ipte_control; old = READ_ONCE(*ic); do { new = old; new.k = 0; } while (!try_cmpxchg(&ic->val, &old.val, new.val)); - read_unlock(&kvm->arch.sca_lock); wake_up(&kvm->arch.ipte_wq); out: mutex_unlock(&kvm->arch.ipte_mutex); @@ -172,12 +162,10 @@ static void ipte_lock_siif(struct kvm *kvm) union ipte_control old, new, *ic; retry: - read_lock(&kvm->arch.sca_lock); ic = &kvm->arch.sca->ipte_control; old = READ_ONCE(*ic); do { if (old.kg) { - read_unlock(&kvm->arch.sca_lock); cond_resched(); goto retry; } @@ -185,14 +173,12 @@ static void ipte_lock_siif(struct kvm *kvm) new.k = 1; new.kh++; } while (!try_cmpxchg(&ic->val, &old.val, new.val)); - read_unlock(&kvm->arch.sca_lock); } static void ipte_unlock_siif(struct kvm *kvm) { union ipte_control old, new, *ic; - read_lock(&kvm->arch.sca_lock); ic = &kvm->arch.sca->ipte_control; old = READ_ONCE(*ic); do { @@ -201,7 +187,6 @@ static void ipte_unlock_siif(struct kvm *kvm) if (!new.kh) new.k = 0; } while (!try_cmpxchg(&ic->val, &old.val, new.val)); - read_unlock(&kvm->arch.sca_lock); if (!new.kh) wake_up(&kvm->arch.ipte_wq); } diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index f2d29c99253b6f2e804ebc9055ac5447efb9a427..283af13314a292d9b7ae71580592d913544d10fe 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -44,48 +44,34 @@ static struct kvm_s390_gib *gib; /* handle external calls via sigp interpretation facility */ static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id) { - union esca_sigp_ctrl sigp_ctrl; - struct esca_block *sca; - int c, scn; + struct esca_block *sca = vcpu->kvm->arch.sca; + union esca_sigp_ctrl sigp_ctrl = sca->cpu[vcpu->vcpu_id].sigp_ctrl; if (!kvm_s390_test_cpuflags(vcpu, CPUSTAT_ECALL_PEND)) return 0; BUG_ON(!kvm_s390_use_sca_entries()); - read_lock(&vcpu->kvm->arch.sca_lock); - sca = vcpu->kvm->arch.sca; - sigp_ctrl = sca->cpu[vcpu->vcpu_id].sigp_ctrl; - - c = sigp_ctrl.c; - scn = sigp_ctrl.scn; - read_unlock(&vcpu->kvm->arch.sca_lock); if (src_id) - *src_id = scn; + *src_id = sigp_ctrl.scn; - return c; + return sigp_ctrl.c; } static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id) { - union esca_sigp_ctrl old_val, new_val = {0}; - union esca_sigp_ctrl *sigp_ctrl; - struct esca_block *sca; + struct esca_block *sca = vcpu->kvm->arch.sca; + union esca_sigp_ctrl *sigp_ctrl = &sca->cpu[vcpu->vcpu_id].sigp_ctrl; + union esca_sigp_ctrl old_val, new_val = {.scn = src_id, .c = 1}; int expect, rc; BUG_ON(!kvm_s390_use_sca_entries()); - read_lock(&vcpu->kvm->arch.sca_lock); - sca = vcpu->kvm->arch.sca; - sigp_ctrl = &sca->cpu[vcpu->vcpu_id].sigp_ctrl; old_val = READ_ONCE(*sigp_ctrl); - new_val.scn = src_id; - new_val.c = 1; old_val.c = 0; expect = old_val.value; rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value); - read_unlock(&vcpu->kvm->arch.sca_lock); if (rc != expect) { /* another external call is pending */ @@ -97,18 +83,14 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id) static void sca_clear_ext_call(struct kvm_vcpu *vcpu) { - union esca_sigp_ctrl *sigp_ctrl; - struct esca_block *sca; + struct esca_block *sca = vcpu->kvm->arch.sca; + union esca_sigp_ctrl *sigp_ctrl = &sca->cpu[vcpu->vcpu_id].sigp_ctrl; if (!kvm_s390_use_sca_entries()) return; kvm_s390_clear_cpuflags(vcpu, CPUSTAT_ECALL_PEND); - read_lock(&vcpu->kvm->arch.sca_lock); - sca = vcpu->kvm->arch.sca; - sigp_ctrl = &sca->cpu[vcpu->vcpu_id].sigp_ctrl; WRITE_ONCE(sigp_ctrl->value, 0); - read_unlock(&vcpu->kvm->arch.sca_lock); } int psw_extint_disabled(struct kvm_vcpu *vcpu) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 93b0497c7618e34009b15d291e33dc425e46992f..b04307b762017ee57625c4c35327f0afd9eb43b1 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1937,14 +1937,12 @@ static void kvm_s390_update_topology_change_report(struct kvm *kvm, bool val) union sca_utility new, old; struct esca_block *sca; - read_lock(&kvm->arch.sca_lock); sca = kvm->arch.sca; old = READ_ONCE(sca->utility); do { new = old; new.mtcr = val; } while (!try_cmpxchg(&sca->utility.val, &old.val, new.val)); - read_unlock(&kvm->arch.sca_lock); } static int kvm_s390_set_topo_change_indication(struct kvm *kvm, @@ -1965,9 +1963,7 @@ static int kvm_s390_get_topo_change_indication(struct kvm *kvm, if (!test_kvm_facility(kvm, 11)) return -ENXIO; - read_lock(&kvm->arch.sca_lock); topo = kvm->arch.sca->utility.mtcr; - read_unlock(&kvm->arch.sca_lock); return put_user(topo, (u8 __user *)attr->addr); } @@ -3344,7 +3340,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) if (!sclp.has_64bscao) alloc_flags |= GFP_DMA; - rwlock_init(&kvm->arch.sca_lock); mutex_lock(&kvm_lock); kvm->arch.sca = alloc_pages_exact(sizeof(*kvm->arch.sca), alloc_flags); @@ -3529,41 +3524,30 @@ static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu) static void sca_del_vcpu(struct kvm_vcpu *vcpu) { - struct esca_block *sca; + struct esca_block *sca = vcpu->kvm->arch.sca; if (!kvm_s390_use_sca_entries()) return; - read_lock(&vcpu->kvm->arch.sca_lock); - sca = vcpu->kvm->arch.sca; clear_bit_inv(vcpu->vcpu_id, (unsigned long *)sca->mcn); sca->cpu[vcpu->vcpu_id].sda = 0; - read_unlock(&vcpu->kvm->arch.sca_lock); } static void sca_add_vcpu(struct kvm_vcpu *vcpu) { - struct esca_block *sca; - phys_addr_t sca_phys; - - if (!kvm_s390_use_sca_entries()) { - sca_phys = virt_to_phys(vcpu->kvm->arch.sca); - - /* we still need the basic sca for the ipte control */ - vcpu->arch.sie_block->scaoh = sca_phys >> 32; - vcpu->arch.sie_block->scaol = sca_phys; - return; - } - read_lock(&vcpu->kvm->arch.sca_lock); - sca = vcpu->kvm->arch.sca; - sca_phys = virt_to_phys(sca); + struct esca_block *sca = vcpu->kvm->arch.sca; + phys_addr_t sca_phys = virt_to_phys(sca); - sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block); + /* we still need the sca header for the ipte control */ vcpu->arch.sie_block->scaoh = sca_phys >> 32; vcpu->arch.sie_block->scaol = sca_phys & ESCA_SCAOL_MASK; vcpu->arch.sie_block->ecb2 |= ECB2_ESCA; + + if (!kvm_s390_use_sca_entries()) + return; + set_bit_inv(vcpu->vcpu_id, (unsigned long *)sca->mcn); - read_unlock(&vcpu->kvm->arch.sca_lock); + sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block); } static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id) --- base-commit: ec7714e4947909190ffb3041a03311a975350fe0 change-id: 20250602-rm-sca-lock-d7c1eca252b1 prerequisite-change-id: 20250513-rm-bsca-ab1e8649aca7:v7 prerequisite-patch-id: 52dadcc65bc9fddfee7499ed55a3fa909051ab1c Best regards, -- Christoph Schlameuss <schlameuss@xxxxxxxxxxxxx>