On Wed, 2025-03-26 at 19:36 +0000, Yosry Ahmed wrote: > Refactor a helper out of kvm_mmu_invalidate_addr() that allows skipping > the gva flush. This will be used when an invalidation is needed but the > GVA TLB translations that require invalidation are not of the current > context (e.g. when emulating INVLPGA for L1 to flush L2's translations). > > No functional change intended. > > Signed-off-by: Yosry Ahmed <yosry.ahmed@xxxxxxxxx> > --- > arch/x86/kvm/mmu/mmu.c | 12 +++++++++--- > 1 file changed, 9 insertions(+), 3 deletions(-) > > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c > index 4a72ada0a7585..e2b1994f12753 100644 > --- a/arch/x86/kvm/mmu/mmu.c > +++ b/arch/x86/kvm/mmu/mmu.c > @@ -6355,15 +6355,15 @@ static void kvm_mmu_invalidate_addr_in_root(struct kvm_vcpu *vcpu, > write_unlock(&vcpu->kvm->mmu_lock); > } > > -void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, > - u64 addr, unsigned long roots) > +static void __kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, > + u64 addr, unsigned long roots, bool gva_flush) > { > int i; > > WARN_ON_ONCE(roots & ~KVM_MMU_ROOTS_ALL); > > /* It's actually a GPA for vcpu->arch.guest_mmu. */ > - if (mmu != &vcpu->arch.guest_mmu) { > + if (gva_flush && mmu != &vcpu->arch.guest_mmu) { > /* INVLPG on a non-canonical address is a NOP according to the SDM. */ > if (is_noncanonical_invlpg_address(addr, vcpu)) > return; > @@ -6382,6 +6382,12 @@ void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, > kvm_mmu_invalidate_addr_in_root(vcpu, mmu, addr, mmu->prev_roots[i].hpa); > } > } > + > +void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, > + u64 addr, unsigned long roots) > +{ > + __kvm_mmu_invalidate_addr(vcpu, mmu, addr, roots, true); > +} > EXPORT_SYMBOL_GPL(kvm_mmu_invalidate_addr); > > void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) Reviewed-by: Maxim Levitsky <mlevitsk@xxxxxxxxxx> Best regards, Maxim Levitsky