On 6/5/2025 2:14 PM, Anup Patel wrote: > The kvm_riscv_local_tlb_sanitize() deals with sanitizing current > VMID related TLB mappings when a VCPU is moved from one host CPU > to another. > > Let's move kvm_riscv_local_tlb_sanitize() to VMID management > sources and rename it to kvm_riscv_gstage_vmid_sanitize(). > > Signed-off-by: Anup Patel <apatel@xxxxxxxxxxxxxxxx> > --- > arch/riscv/include/asm/kvm_host.h | 3 +-- > arch/riscv/kvm/tlb.c | 23 ----------------------- > arch/riscv/kvm/vcpu.c | 4 ++-- > arch/riscv/kvm/vmid.c | 23 +++++++++++++++++++++++ > 4 files changed, 26 insertions(+), 27 deletions(-) > > diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h > index 85cfebc32e4c..134adc30af52 100644 > --- a/arch/riscv/include/asm/kvm_host.h > +++ b/arch/riscv/include/asm/kvm_host.h > @@ -327,8 +327,6 @@ void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid, > unsigned long order); > void kvm_riscv_local_hfence_vvma_all(unsigned long vmid); > > -void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu); > - > void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu); > void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu); > void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu); > @@ -376,6 +374,7 @@ unsigned long kvm_riscv_gstage_vmid_bits(void); > int kvm_riscv_gstage_vmid_init(struct kvm *kvm); > bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid); > void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu); > +void kvm_riscv_gstage_vmid_sanitize(struct kvm_vcpu *vcpu); > > int kvm_riscv_setup_default_irq_routing(struct kvm *kvm, u32 lines); > > diff --git a/arch/riscv/kvm/tlb.c b/arch/riscv/kvm/tlb.c > index 2f91ea5f8493..b3461bfd9756 100644 > --- a/arch/riscv/kvm/tlb.c > +++ b/arch/riscv/kvm/tlb.c > @@ -156,29 +156,6 @@ void kvm_riscv_local_hfence_vvma_all(unsigned long vmid) > csr_write(CSR_HGATP, hgatp); > } > > -void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu) > -{ > - unsigned long vmid; > - > - if (!kvm_riscv_gstage_vmid_bits() || > - vcpu->arch.last_exit_cpu == vcpu->cpu) > - return; > - > - /* > - * On RISC-V platforms with hardware VMID support, we share same > - * VMID for all VCPUs of a particular Guest/VM. This means we might > - * have stale G-stage TLB entries on the current Host CPU due to > - * some other VCPU of the same Guest which ran previously on the > - * current Host CPU. > - * > - * To cleanup stale TLB entries, we simply flush all G-stage TLB > - * entries by VMID whenever underlying Host CPU changes for a VCPU. > - */ > - > - vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid); > - kvm_riscv_local_hfence_gvma_vmid_all(vmid); > -} > - > void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu) > { > kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_RCVD); > diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c > index f98a1894d55b..cc7d00bcf345 100644 > --- a/arch/riscv/kvm/vcpu.c > +++ b/arch/riscv/kvm/vcpu.c > @@ -961,12 +961,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) > } > > /* > - * Cleanup stale TLB enteries > + * Sanitize VMID mappings cached (TLB) on current CPU > * > * Note: This should be done after G-stage VMID has been > * updated using kvm_riscv_gstage_vmid_ver_changed() > */ > - kvm_riscv_local_tlb_sanitize(vcpu); > + kvm_riscv_gstage_vmid_sanitize(vcpu); > > trace_kvm_entry(vcpu); > > diff --git a/arch/riscv/kvm/vmid.c b/arch/riscv/kvm/vmid.c > index ddc98714ce8e..92c01255f86f 100644 > --- a/arch/riscv/kvm/vmid.c > +++ b/arch/riscv/kvm/vmid.c > @@ -122,3 +122,26 @@ void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu) > kvm_for_each_vcpu(i, v, vcpu->kvm) > kvm_make_request(KVM_REQ_UPDATE_HGATP, v); > } > + > +void kvm_riscv_gstage_vmid_sanitize(struct kvm_vcpu *vcpu) > +{ > + unsigned long vmid; > + > + if (!kvm_riscv_gstage_vmid_bits() || > + vcpu->arch.last_exit_cpu == vcpu->cpu) > + return; > + > + /* > + * On RISC-V platforms with hardware VMID support, we share same > + * VMID for all VCPUs of a particular Guest/VM. This means we might > + * have stale G-stage TLB entries on the current Host CPU due to > + * some other VCPU of the same Guest which ran previously on the > + * current Host CPU. > + * > + * To cleanup stale TLB entries, we simply flush all G-stage TLB > + * entries by VMID whenever underlying Host CPU changes for a VCPU. > + */ > + > + vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid); > + kvm_riscv_local_hfence_gvma_vmid_all(vmid); > +} Thanks. Reviewed-by: Nutty Liu<liujingqi@xxxxxxxxxxxxxxxxxxx>