svm_flush_tlb_asid() currently operates on the current VMCB. In preparation for properly tracking TLB flushes for L1 and L2 ASIDs, refactor it to take is_guest_mode and find the proper VMCB. All existing callers pass is_guest_mode(vcpu) to maintain existing behavior for now. Move the comment about only flushing the current ASID to svm_flush_tlb_all(), where it probably should have been anyway, because svm_flush_tlb_asid() now flushes a given ASID, not the current ASID. Create a svm_flush_tlb_guest() wrapper to use as the flush_tlb_guest() callback. No functional change intended. Signed-off-by: Yosry Ahmed <yosry.ahmed@xxxxxxxxx> --- arch/x86/kvm/svm/svm.c | 39 +++++++++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 14 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 865c5ce4fa473..fb6b9f88a1504 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4016,25 +4016,24 @@ static void svm_enable_nmi_window(struct kvm_vcpu *vcpu) svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); } -static void svm_flush_tlb_asid(struct kvm_vcpu *vcpu) +static struct vmcb *svm_get_vmcb(struct vcpu_svm *svm, bool is_guest_mode) +{ + return is_guest_mode ? svm->nested.vmcb02.ptr : svm->vmcb01.ptr; +} + +static void svm_flush_tlb_asid(struct kvm_vcpu *vcpu, bool is_guest_mode) { struct vcpu_svm *svm = to_svm(vcpu); + struct vmcb *vmcb = svm_get_vmcb(svm, is_guest_mode); /* * Unlike VMX, SVM doesn't provide a way to flush only NPT TLB entries. * A TLB flush for the current ASID flushes both "host" and "guest" TLB * entries, and thus is a superset of Hyper-V's fine grained flushing. */ - kvm_hv_vcpu_purge_flush_tlb(vcpu, is_guest_mode(vcpu)); - - /* - * Flush only the current ASID even if the TLB flush was invoked via - * kvm_flush_remote_tlbs(). Although flushing remote TLBs requires all - * ASIDs to be flushed, KVM uses a single ASID for L1 and L2, and - * unconditionally does a TLB flush on both nested VM-Enter and nested - * VM-Exit (via kvm_mmu_reset_context()). - */ - vmcb_set_flush_asid(svm->vmcb); + kvm_hv_vcpu_purge_flush_tlb(vcpu, is_guest_mode); + if (vmcb) + vmcb_set_flush_asid(vmcb); } static void svm_flush_tlb_current(struct kvm_vcpu *vcpu) @@ -4050,7 +4049,7 @@ static void svm_flush_tlb_current(struct kvm_vcpu *vcpu) if (svm_hv_is_enlightened_tlb_enabled(vcpu) && VALID_PAGE(root_tdp)) hyperv_flush_guest_mapping(root_tdp); - svm_flush_tlb_asid(vcpu); + svm_flush_tlb_asid(vcpu, is_guest_mode(vcpu)); } static void svm_flush_tlb_all(struct kvm_vcpu *vcpu) @@ -4065,7 +4064,14 @@ static void svm_flush_tlb_all(struct kvm_vcpu *vcpu) if (WARN_ON_ONCE(svm_hv_is_enlightened_tlb_enabled(vcpu))) hv_flush_remote_tlbs(vcpu->kvm); - svm_flush_tlb_asid(vcpu); + /* + * Flush only the current ASID even if the TLB flush was invoked via + * kvm_flush_remote_tlbs(). Although flushing remote TLBs requires all + * ASIDs to be flushed, KVM uses a single ASID for L1 and L2, and + * unconditionally does a TLB flush on both nested VM-Enter and nested + * VM-Exit (via kvm_mmu_reset_context()). + */ + svm_flush_tlb_asid(vcpu, is_guest_mode(vcpu)); } static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva) @@ -4075,6 +4081,11 @@ static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva) invlpga(gva, svm_get_current_asid(svm)); } +static void svm_flush_tlb_guest(struct kvm_vcpu *vcpu) +{ + svm_flush_tlb_asid(vcpu, is_guest_mode(vcpu)); +} + static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -5187,7 +5198,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .flush_tlb_all = svm_flush_tlb_all, .flush_tlb_current = svm_flush_tlb_current, .flush_tlb_gva = svm_flush_tlb_gva, - .flush_tlb_guest = svm_flush_tlb_asid, + .flush_tlb_guest = svm_flush_tlb_guest, .vcpu_pre_run = svm_vcpu_pre_run, .vcpu_run = svm_vcpu_run, -- 2.49.0.395.g12beb8f557-goog