Re: [PATCH 01/28] KVM: SVM: Don't BUG if setting up the MSR intercept bitmaps fails

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Thu, May 29, 2025 at 04:39:46PM -0700, Sean Christopherson wrote:
>WARN and reject module loading if there is a problem with KVM's MSR
>interception bitmaps.  Panicking the host in this situation is inexcusable
>since it is trivially easy to propagate the error up the stack.
>
>Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx>
>---
> arch/x86/kvm/svm/svm.c | 27 +++++++++++++++------------
> 1 file changed, 15 insertions(+), 12 deletions(-)
>
>diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
>index 0ad1a6d4fb6d..bd75ff8e4f20 100644
>--- a/arch/x86/kvm/svm/svm.c
>+++ b/arch/x86/kvm/svm/svm.c
>@@ -945,7 +945,7 @@ static void svm_msr_filter_changed(struct kvm_vcpu *vcpu)
> 	}
> }
> 
>-static void add_msr_offset(u32 offset)
>+static int add_msr_offset(u32 offset)
> {
> 	int i;
> 
>@@ -953,7 +953,7 @@ static void add_msr_offset(u32 offset)
> 
> 		/* Offset already in list? */
> 		if (msrpm_offsets[i] == offset)
>-			return;
>+			return 0;
> 
> 		/* Slot used by another offset? */
> 		if (msrpm_offsets[i] != MSR_INVALID)
>@@ -962,17 +962,13 @@ static void add_msr_offset(u32 offset)
> 		/* Add offset to list */
> 		msrpm_offsets[i] = offset;
> 
>-		return;
>+		return 0;
> 	}
> 
>-	/*
>-	 * If this BUG triggers the msrpm_offsets table has an overflow. Just
>-	 * increase MSRPM_OFFSETS in this case.
>-	 */
>-	BUG();
>+	return -EIO;

Would -ENOSPC be more appropriate here? And, instead of returning an integer,
using a boolean might be better since the error code isn't propagated upwards.

> }
> 
>-static void init_msrpm_offsets(void)
>+static int init_msrpm_offsets(void)
> {
> 	int i;
> 
>@@ -982,10 +978,13 @@ static void init_msrpm_offsets(void)
> 		u32 offset;
> 
> 		offset = svm_msrpm_offset(direct_access_msrs[i].index);
>-		BUG_ON(offset == MSR_INVALID);
>+		if (WARN_ON(offset == MSR_INVALID))
>+			return -EIO;
> 
>-		add_msr_offset(offset);
>+		if (WARN_ON_ONCE(add_msr_offset(offset)))
>+			return -EIO;
> 	}
>+	return 0;
> }
> 
> void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
>@@ -5511,7 +5510,11 @@ static __init int svm_hardware_setup(void)
> 	memset(iopm_va, 0xff, PAGE_SIZE * (1 << order));
> 	iopm_base = __sme_page_pa(iopm_pages);
> 
>-	init_msrpm_offsets();
>+	r = init_msrpm_offsets();
>+	if (r) {
>+		__free_pages(__sme_pa_to_page(iopm_base), get_order(IOPM_SIZE));

__free_pages(iopm_pages, order);

And we can move init_msrpm_offsets() above the allocation of iopm_pages to
avoid the need for rewinding. But I don't have a strong opinion on this, as
it goes beyond a simple change to the return type.

>+		return r;
>+	}
> 
> 	kvm_caps.supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS |
> 				     XFEATURE_MASK_BNDCSR);
>-- 
>2.49.0.1204.g71687c7c1d-goog
>




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux