On Wed, 2025-03-26 at 19:36 +0000, Yosry Ahmed wrote: > Now that the ASID to vCPU/VMCB tracking was moved out of pre_sev_run(), > the only remaining pieces are: > (a) Checking for valid VMSA. > (b) Assigning svm->asid. > (c) Flush the ASID if the VMCB is run on a different physical CPU. > > The check in (c) is already being done in pre_svm_run(), and so is > redundant. (a) and (b) are small enough and probably do not warrant a > separate helper (and (b) will be going way soon), so open-code the > function into pre_svm_run() and remove it. > > Signed-off-by: Yosry Ahmed <yosry.ahmed@xxxxxxxxx> > --- > arch/x86/kvm/svm/sev.c | 28 ---------------------------- > arch/x86/kvm/svm/svm.c | 16 ++++++++++++++-- > arch/x86/kvm/svm/svm.h | 1 - > 3 files changed, 14 insertions(+), 31 deletions(-) > > diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c > index 3ef0dfdbb34d2..1742f51d4c194 100644 > --- a/arch/x86/kvm/svm/sev.c > +++ b/arch/x86/kvm/svm/sev.c > @@ -3451,34 +3451,6 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm) > svm->sev_es.ghcb = NULL; > } > > -int pre_sev_run(struct vcpu_svm *svm, int cpu) > -{ > - struct kvm *kvm = svm->vcpu.kvm; > - unsigned int asid = sev_get_asid(kvm); > - > - /* > - * Reject KVM_RUN if userspace attempts to run the vCPU with an invalid > - * VMSA, e.g. if userspace forces the vCPU to be RUNNABLE after an SNP > - * AP Destroy event. > - */ > - if (sev_es_guest(kvm) && !VALID_PAGE(svm->vmcb->control.vmsa_pa)) > - return -EINVAL; > - > - /* Assign the asid allocated with this SEV guest */ > - svm->asid = asid; > - > - /* > - * Flush guest TLB if the VMCB was executed on a differet host CPU in > - * previous VMRUNs. > - */ > - if (svm->vcpu.arch.last_vmentry_cpu == cpu) > - return 0; > - > - vmcb_set_flush_asid(svm->vmcb); > - vmcb_mark_dirty(svm->vmcb, VMCB_ASID); > - return 0; > -} > - > #define GHCB_SCRATCH_AREA_LIMIT (16ULL * PAGE_SIZE) > static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) > { > diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c > index e6e380411fbec..ce67112732e8c 100644 > --- a/arch/x86/kvm/svm/svm.c > +++ b/arch/x86/kvm/svm/svm.c > @@ -3649,8 +3649,20 @@ static int pre_svm_run(struct kvm_vcpu *vcpu) > svm->current_vmcb->cpu = vcpu->cpu; > } > > - if (sev_guest(vcpu->kvm)) > - return pre_sev_run(svm, vcpu->cpu); > + if (sev_guest(vcpu->kvm)) { > + /* Assign the asid allocated with this SEV guest */ > + svm->asid = sev_get_asid(vcpu->kvm); > + > + /* > + * Reject KVM_RUN if userspace attempts to run the vCPU with an invalid > + * VMSA, e.g. if userspace forces the vCPU to be RUNNABLE after an SNP > + * AP Destroy event. > + */ > + if (sev_es_guest(vcpu->kvm) && > + !VALID_PAGE(svm->vmcb->control.vmsa_pa)) > + return -EINVAL; > + return 0; > + } > > /* FIXME: handle wraparound of asid_generation */ > if (svm->current_vmcb->asid_generation != sd->asid_generation) > diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h > index ca38a233fa24c..3ab2a424992c1 100644 > --- a/arch/x86/kvm/svm/svm.h > +++ b/arch/x86/kvm/svm/svm.h > @@ -760,7 +760,6 @@ void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu); > > /* sev.c */ > > -int pre_sev_run(struct vcpu_svm *svm, int cpu); > void sev_init_vmcb(struct vcpu_svm *svm); > void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm); > int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in); Reviewed-by: Maxim Levitsky <mlevitsk@xxxxxxxxxx> Best regards, Maxim Levitsky