On Tue, Apr 08, 2025 at 09:41:34PM -0400, Maxim Levitsky wrote: > diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c > index 69782df3617f..71c0d8c35b4b 100644 > --- a/virt/kvm/kvm_main.c > +++ b/virt/kvm/kvm_main.c > @@ -1368,6 +1368,77 @@ static int kvm_vm_release(struct inode *inode, struct file *filp) > return 0; > } > > + > +/* > + * Lock all VM vCPUs. > + * Can be used nested (to lock vCPUS of two VMs for example) > + */ > +int kvm_lock_all_vcpus_nested(struct kvm *kvm, bool trylock, unsigned int role) > +{ > + struct kvm_vcpu *vcpu; > + unsigned long i, j; > + > + lockdep_assert_held(&kvm->lock); > + > + kvm_for_each_vcpu(i, vcpu, kvm) { > + > + if (trylock && !mutex_trylock_nested(&vcpu->mutex, role)) > + goto out_unlock; > + else if (!trylock && mutex_lock_killable_nested(&vcpu->mutex, role)) > + goto out_unlock; > + > +#ifdef CONFIG_PROVE_LOCKING > + if (!i) > + /* > + * Reset the role to one that avoids colliding with > + * the role used for the first vcpu mutex. > + */ > + role = MAX_LOCK_DEPTH - 1; > + else > + mutex_release(&vcpu->mutex.dep_map, _THIS_IP_); > +#endif > + } This code is all sorts of terrible. Per the lockdep_assert_held() above, you serialize all these locks by holding that lock, this means you can be using the _nest_lock() annotation. Also, the original code didn't have this trylock nonsense, and the Changelog doesn't mention this -- in fact the Changelog claims no change, which is patently false. Anyway, please write like: kvm_for_each_vcpu(i, vcpu, kvm) { if (mutex_lock_killable_nest_lock(&vcpu->mutex, &kvm->lock)) goto unlock; } return 0; unlock: kvm_for_each_vcpu(j, vcpu, kvm) { if (j == i) break; mutex_unlock(&vcpu->mutex); } return -EINTR; And yes, you'll have to add mutex_lock_killable_nest_lock(), but that should be trivial.