On Thu, Aug 14, 2025, Kai Huang wrote: > arch/x86/include/asm/tdx.h | 2 ++ > arch/x86/kvm/vmx/tdx.c | 12 ++++++++++++ > arch/x86/virt/vmx/tdx/tdx.c | 12 ++++++++++++ > 3 files changed, 26 insertions(+) > > diff --git a/arch/x86/include/asm/tdx.h b/arch/x86/include/asm/tdx.h > index 0922265c6bdc..e9a213582f03 100644 > --- a/arch/x86/include/asm/tdx.h > +++ b/arch/x86/include/asm/tdx.h > @@ -217,6 +217,7 @@ u64 tdh_mem_page_remove(struct tdx_td *td, u64 gpa, u64 level, u64 *ext_err1, u6 > u64 tdh_phymem_cache_wb(bool resume); > u64 tdh_phymem_page_wbinvd_tdr(struct tdx_td *td); > u64 tdh_phymem_page_wbinvd_hkid(u64 hkid, struct page *page); > +void tdx_cpu_flush_cache(void); > #else > static inline void tdx_init(void) { } > static inline int tdx_cpu_enable(void) { return -ENODEV; } > @@ -224,6 +225,7 @@ static inline int tdx_enable(void) { return -ENODEV; } > static inline u32 tdx_get_nr_guest_keyids(void) { return 0; } > static inline const char *tdx_dump_mce_info(struct mce *m) { return NULL; } > static inline const struct tdx_sys_info *tdx_get_sysinfo(void) { return NULL; } > +static inline void tdx_cpu_flush_cache(void) { } Stub is unnecessary. tdx.c is built iff KVM_INTEL_TDX=y, and that depends on INTEL_TDX_HOST. At a glance, some of the existing stubs are useless as well. > #endif /* CONFIG_INTEL_TDX_HOST */ > > #endif /* !__ASSEMBLER__ */ > diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c > index 66744f5768c8..1bc6f52e0cd7 100644 > --- a/arch/x86/kvm/vmx/tdx.c > +++ b/arch/x86/kvm/vmx/tdx.c > @@ -442,6 +442,18 @@ void tdx_disable_virtualization_cpu(void) > tdx_flush_vp(&arg); > } > local_irq_restore(flags); > + > + /* > + * No more TDX activity on this CPU from here. Flush cache to > + * avoid having to do WBINVD in stop_this_cpu() during kexec. > + * > + * Kexec calls native_stop_other_cpus() to stop remote CPUs > + * before booting to new kernel, but that code has a "race" > + * when the normal REBOOT IPI times out and NMIs are sent to > + * remote CPUs to stop them. Doing WBINVD in stop_this_cpu() > + * could potentially increase the possibility of the "race". > + */ > + tdx_cpu_flush_cache(); IIUC, this can be: if (IS_ENABLED(CONFIG_KEXEC)) tdx_cpu_flush_cache(); > } > > #define TDX_SEAMCALL_RETRIES 10000 > diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c > index 3ea6f587c81a..c26e2e07ff6b 100644 > --- a/arch/x86/virt/vmx/tdx/tdx.c > +++ b/arch/x86/virt/vmx/tdx/tdx.c > @@ -1870,3 +1870,15 @@ u64 tdh_phymem_page_wbinvd_hkid(u64 hkid, struct page *page) > return seamcall(TDH_PHYMEM_PAGE_WBINVD, &args); > } > EXPORT_SYMBOL_GPL(tdh_phymem_page_wbinvd_hkid); > + > +void tdx_cpu_flush_cache(void) > +{ > + lockdep_assert_preemption_disabled(); > + > + if (!this_cpu_read(cache_state_incoherent)) > + return; > + > + wbinvd(); > + this_cpu_write(cache_state_incoherent, false); > +} > +EXPORT_SYMBOL_GPL(tdx_cpu_flush_cache); > -- > 2.50.1 >