On Thu, May 01, 2025, Marc Zyngier wrote: > > diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c > > index 69782df3617f..834f08dfa24c 100644 > > --- a/virt/kvm/kvm_main.c > > +++ b/virt/kvm/kvm_main.c > > @@ -1368,6 +1368,40 @@ static int kvm_vm_release(struct inode *inode, struct file *filp) > > return 0; > > } > > > > +/* > > + * Try to lock all of the VM's vCPUs. > > + * Assumes that the kvm->lock is held. > > Assuming is not enough. These assertions have caught a number of bugs, > and I'm not prepared to drop them. > > > + */ > > +int kvm_trylock_all_vcpus(struct kvm *kvm) > > +{ > > + struct kvm_vcpu *vcpu; > > + unsigned long i, j; > > + > > + kvm_for_each_vcpu(i, vcpu, kvm) > > + if (!mutex_trylock_nest_lock(&vcpu->mutex, &kvm->lock)) > > + goto out_unlock; > > + return 0; > > + > > +out_unlock: > > + kvm_for_each_vcpu(j, vcpu, kvm) { > > + if (i == j) > > + break; > > + mutex_unlock(&vcpu->mutex); > > + } > > + return -EINTR; > > +} > > +EXPORT_SYMBOL_GPL(kvm_trylock_all_vcpus); > > + > > +void kvm_unlock_all_vcpus(struct kvm *kvm) > > +{ > > + struct kvm_vcpu *vcpu; > > + unsigned long i; > > + > > + kvm_for_each_vcpu(i, vcpu, kvm) > > + mutex_unlock(&vcpu->mutex); > > +} > > +EXPORT_SYMBOL_GPL(kvm_unlock_all_vcpus); > > I don't mind you not including the assertions in these helpers, I do :-) I see no reason not to add assertions here, if locking all vCPUs is a hot path, we've probably got bigger problems.