On Tue, Apr 22, 2025, Zack Rusin wrote: > diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c > index 04c375bf1ac2..74c472e51479 100644 > --- a/arch/x86/kvm/svm/nested.c > +++ b/arch/x86/kvm/svm/nested.c > @@ -22,6 +22,7 @@ > #include <asm/debugreg.h> > > #include "kvm_emulate.h" > +#include "kvm_vmware.h" > #include "trace.h" > #include "mmu.h" > #include "x86.h" > @@ -1517,6 +1518,11 @@ int nested_svm_exit_special(struct vcpu_svm *svm) > svm->vcpu.arch.apf.host_apf_flags) > /* Trap async PF even if not shadowing */ > return NESTED_EXIT_HOST; > +#ifdef CONFIG_KVM_VMWARE > + else if ((exit_code == (SVM_EXIT_EXCP_BASE + GP_VECTOR)) && > + kvm_vmware_wants_backdoor_to_l0(vcpu, to_svm(vcpu)->vmcb->save.cpl)) > + return NESTED_EXIT_HOST; > +#endif Either provide a stub or do else if (IS_ENABLED(CONFIG_KVM_VMWARE) && ...) Don't do both. And definitely don't add a stub and #ifdef (some) callers. I'd say just drop the #ifdef and rely on the kvm_vmware_wants_backdoor_to_l0() stub to get the compiler to optimize out the entire elif. > @@ -6386,6 +6387,11 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, > return true; > else if (is_ve_fault(intr_info)) > return true; > +#ifdef CONFIG_KVM_VMWARE > + else if (is_gp_fault(intr_info) && > + kvm_vmware_wants_backdoor_to_l0(vcpu, vmx_get_cpl(vcpu))) > + return true; > +#endif Same thing here.