Re: [PATCH 11/28] KVM: SVM: Add helpers for accessing MSR bitmap that don't rely on offsets

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 5/30/25 01:39, Sean Christopherson wrote:
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 47a36a9a7fe5..e432cd7a7889 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -628,6 +628,50 @@ static_assert(SVM_MSRS_PER_RANGE == 8192);
  #define SVM_MSRPM_RANGE_1_BASE_MSR	0xc0000000
  #define SVM_MSRPM_RANGE_2_BASE_MSR	0xc0010000
+#define SVM_MSRPM_FIRST_MSR(range_nr) \
+	(SVM_MSRPM_RANGE_## range_nr ##_BASE_MSR)
+#define SVM_MSRPM_LAST_MSR(range_nr)	\
+	(SVM_MSRPM_RANGE_## range_nr ##_BASE_MSR + SVM_MSRS_PER_RANGE - 1)
+
+#define SVM_MSRPM_BIT_NR(range_nr, msr)						\
+	(range_nr * SVM_MSRPM_BYTES_PER_RANGE * BITS_PER_BYTE +			\
+	 (msr - SVM_MSRPM_RANGE_## range_nr ##_BASE_MSR) * SVM_BITS_PER_MSR)
+
+#define SVM_MSRPM_SANITY_CHECK_BITS(range_nr)					\
+static_assert(SVM_MSRPM_BIT_NR(range_nr, SVM_MSRPM_FIRST_MSR(range_nr) + 1) ==	\
+	      range_nr * 2048 * 8 + 2);						\
+static_assert(SVM_MSRPM_BIT_NR(range_nr, SVM_MSRPM_FIRST_MSR(range_nr) + 7) ==	\
+	      range_nr * 2048 * 8 + 14);
+
+SVM_MSRPM_SANITY_CHECK_BITS(0);
+SVM_MSRPM_SANITY_CHECK_BITS(1);
+SVM_MSRPM_SANITY_CHECK_BITS(2);

Replying here for patches 11/25/26. None of this is needed, just write a function like this:

static inline u32 svm_msr_bit(u32 msr)
{
	u32 msr_base = msr & ~(SVM_MSRS_PER_RANGE - 1);
	if (msr_base == SVM_MSRPM_RANGE_0_BASE_MSR)
		return SVM_MSRPM_BIT_NR(0, msr);
	if (msr_base == SVM_MSRPM_RANGE_1_BASE_MSR)
		return SVM_MSRPM_BIT_NR(1, msr);
	if (msr_base == SVM_MSRPM_RANGE_2_BASE_MSR)
		return SVM_MSRPM_BIT_NR(2, msr);
	return MSR_INVALID;
}

and you can throw away most of the other macros.  For example:

+#define SVM_BUILD_MSR_BITMAP_CASE(bitmap, range_nr, msr, bitop, bit_rw)		\
+	case SVM_MSRPM_FIRST_MSR(range_nr) ... SVM_MSRPM_LAST_MSR(range_nr):	\
+		return bitop##_bit(SVM_MSRPM_BIT_NR(range_nr, msr) + bit_rw, bitmap);

... becomes a lot more lowercase:

static inline rtype svm_##action##_msr_bitmap_##access(
	unsigned long *bitmap, u32 msr)
{
	u32 bit = svm_msr_bit(msr);
	if (bit == MSR_INVALID)
		return true;
	return bitop##_bit(bit + bit_rw, bitmap);
}


In patch 25, also, you just get

static u32 svm_msrpm_offset(u32 msr)
{
	u32 bit = svm_msr_bit(msr);
	if (bit == MSR_INVALID)
		return MSR_INVALID;
	return bit / BITS_PER_BYTE;
}

And you change everything to -EINVAL in patch 26 to kill MSR_INVALID.

Another nit...

+#define BUILD_SVM_MSR_BITMAP_HELPERS(ret_type, action, bitop)			\
+	__BUILD_SVM_MSR_BITMAP_HELPER(ret_type, action, bitop, read,  0)	\
+	__BUILD_SVM_MSR_BITMAP_HELPER(ret_type, action, bitop, write, 1)
+
+BUILD_SVM_MSR_BITMAP_HELPERS(bool, test, test)
+BUILD_SVM_MSR_BITMAP_HELPERS(void, clear, __clear)
+BUILD_SVM_MSR_BITMAP_HELPERS(void, set, __set)
Yes it's a bit duplication, but no need for the nesting, just do:

BUILD_SVM_MSR_BITMAP_HELPERS(bool, test,  test,    read,  0)
BUILD_SVM_MSR_BITMAP_HELPERS(bool, test,  test,    write, 1)
BUILD_SVM_MSR_BITMAP_HELPERS(void, clear, __clear, read,  0)
BUILD_SVM_MSR_BITMAP_HELPERS(void, clear, __clear, write, 1)
BUILD_SVM_MSR_BITMAP_HELPERS(void, set,   __set,   read,  0)
BUILD_SVM_MSR_BITMAP_HELPERS(void, set,   __set,   write, 1)

Otherwise, really nice.

Paolo





[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux