Re: [PATCH] KVM: VMX: Micro-optimize SPEC_CTRL handling in __vmx_vcpu_run()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Wed, Aug 20, 2025 at 8:10 AM Uros Bizjak <ubizjak@xxxxxxxxx> wrote:
>
> On Tue, Aug 19, 2025 at 8:56 PM Sean Christopherson <seanjc@xxxxxxxxxx> wrote:
> >
> > On Tue, Aug 19, 2025, Uros Bizjak wrote:
> > > > >   2d: 48 8b 7c 24 10          mov    0x10(%rsp),%rdi
> > > > >   32: 8b 87 48 18 00 00       mov    0x1848(%rdi),%eax
> > > > >   38: 65 3b 05 00 00 00 00    cmp    %gs:0x0(%rip),%eax
> > > > >   3f: 74 09                   je     4a <...>
> > > > >   41: b9 48 00 00 00          mov    $0x48,%ecx
> > > > >   46: 31 d2                   xor    %edx,%edx
> > > > >   48: 0f 30                   wrmsr
> > > > >
> > > > > No functional change intended.
> > > > >
> > > > > Signed-off-by: Uros Bizjak <ubizjak@xxxxxxxxx>
> > > > > Cc: Sean Christopherson <seanjc@xxxxxxxxxx>
> > > > > Cc: Paolo Bonzini <pbonzini@xxxxxxxxxx>
> > > > > Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
> > > > > Cc: Ingo Molnar <mingo@xxxxxxxxxx>
> > > > > Cc: Borislav Petkov <bp@xxxxxxxxx>
> > > > > Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx>
> > > > > Cc: "H. Peter Anvin" <hpa@xxxxxxxxx>
> > > > > ---
> > > > >  arch/x86/kvm/vmx/vmenter.S | 6 ++----
> > > > >  1 file changed, 2 insertions(+), 4 deletions(-)
> > > > >
> > > > > diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
> > > > > index 0a6cf5bff2aa..c65de5de92ab 100644
> > > > > --- a/arch/x86/kvm/vmx/vmenter.S
> > > > > +++ b/arch/x86/kvm/vmx/vmenter.S
> > > > > @@ -118,13 +118,11 @@ SYM_FUNC_START(__vmx_vcpu_run)
> > > > >        * and vmentry.
> > > > >        */
> > > > >       mov 2*WORD_SIZE(%_ASM_SP), %_ASM_DI
> > > > > -     movl VMX_spec_ctrl(%_ASM_DI), %edi
> > > > > -     movl PER_CPU_VAR(x86_spec_ctrl_current), %esi
> > > > > -     cmp %edi, %esi
> > > > > +     movl VMX_spec_ctrl(%_ASM_DI), %eax
> > > > > +     cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax
> > > >
> > > > Huh.  There's a pre-existing bug lurking here, and in the SVM code.  SPEC_CTRL
> > > > is an MSR, i.e. a 64-bit value, but the assembly code assumes bits 63:32 are always
> > > > zero.
> > >
> > > But MSBs are zero, MSR is defined in arch/x86/include/msr-index.h as:
> > >
> > > #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
> > >
> > > and "movl $..., %eax" zero-extends the value to full 64-bit width.
> > >
> > > FWIW, MSR_IA32_SPEC_CTR is handled in the same way in arch/x86/entry/entry.S:
> > >
> > > movl $MSR_IA32_PRED_CMD, %ecx
> >
> > That's the MSR index, not the value.  I'm pointing out that:
> >
> >         movl VMX_spec_ctrl(%_ASM_DI), %edi              <== drops vmx->spec_ctrl[63:32]
> >         movl PER_CPU_VAR(x86_spec_ctrl_current), %esi   <== drop x86_spec_ctrl_current[63:32]
> >         cmp %edi, %esi                                  <== can get false negatives
> >         je .Lspec_ctrl_done
> >         mov $MSR_IA32_SPEC_CTRL, %ecx
> >         xor %edx, %edx                                  <== can clobber guest value
> >         mov %edi, %eax
> >         wrmsr
> >
> > The bug is _currently_ benign because neither KVM nor the kernel support setting
> > any of bits 63:32, but it's still a bug that needs to be fixed.
>
> Oh, I see it. Let me try to fix it in a new patch.

VMX patch is at [1]. SVM patch is a bit more involved, because new
32-bit code needs to clobber one additional register. The SVM patch is
attached to this message, but while I compile tested it, I have no
means of testing it with runtime tests. Can you please put it through
your torture tests?

[1] https://lore.kernel.org/lkml/20250820100007.356761-1-ubizjak@xxxxxxxxx/

Uros.
diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S
index 235c4af6b692..a1b9f2ac713c 100644
--- a/arch/x86/kvm/svm/vmenter.S
+++ b/arch/x86/kvm/svm/vmenter.S
@@ -52,11 +52,23 @@
 	 * there must not be any returns or indirect branches between this code
 	 * and vmentry.
 	 */
-	movl SVM_spec_ctrl(%_ASM_DI), %eax
-	cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax
+#ifdef CONFIG_X86_64
+	mov SVM_spec_ctrl(%rdi), %rdx
+	cmp PER_CPU_VAR(x86_spec_ctrl_current), %rdx
+	je 801b
+	movl %edx, %eax
+	shr $32, %rdx
+#else
+	mov SVM_spec_ctrl(%edi), %eax
+	mov PER_CPU_VAR(x86_spec_ctrl_current), %ecx
+	xor %eax, %ecx
+	mov SVM_spec_ctrl + 4(%edi), %edx
+	mov PER_CPU_VAR(x86_spec_ctrl_current + 4), %esi
+	xor %edx, %esi
+	or %esi, %ecx
 	je 801b
+#endif
 	mov $MSR_IA32_SPEC_CTRL, %ecx
-	xor %edx, %edx
 	wrmsr
 	jmp 801b
 .endm
@@ -80,14 +92,31 @@
 	cmpb $0, \spec_ctrl_intercepted
 	jnz 998f
 	rdmsr
-	movl %eax, SVM_spec_ctrl(%_ASM_DI)
+#ifdef CONFIG_X86_64
+	shl $32, %rdx
+	or %rax, %rdx
+	mov %rdx, SVM_spec_ctrl(%rdi)
 998:
-
 	/* Now restore the host value of the MSR if different from the guest's.  */
-	movl PER_CPU_VAR(x86_spec_ctrl_current), %eax
-	cmp SVM_spec_ctrl(%_ASM_DI), %eax
+	mov SVM_spec_ctrl(%rdi), %rdx
+	cmp PER_CPU_VAR(x86_spec_ctrl_current), %rdx
 	je 901b
-	xor %edx, %edx
+	movl %edx, %eax
+	shr $32, %rdx
+#else
+	mov %eax, SVM_spec_ctrl(%edi)
+	mov %edx, SVM_spec_ctrl + 4(%edi)
+998:
+	/* Now restore the host value of the MSR if different from the guest's.  */
+	mov SVM_spec_ctrl(%edi), %eax
+	mov PER_CPU_VAR(x86_spec_ctrl_current), %esi
+	xor %eax, %esi
+	mov SVM_spec_ctrl + 4(%edi), %edx
+	mov PER_CPU_VAR(x86_spec_ctrl_current + 4), %edi
+	xor %edx, %edi
+	or %edi, %esi
+	je 901b
+#endif
 	wrmsr
 	jmp 901b
 .endm

[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux