Store KVM's MSRPM pointers as "void *" instead of "u32 *" to guard against directly accessing the bitmaps outside of code that is explicitly written to access the bitmaps with a specific type. Opportunistically use svm_vcpu_free_msrpm() in svm_vcpu_free() instead of open coding an equivalent. Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx> --- arch/x86/kvm/svm/nested.c | 4 +++- arch/x86/kvm/svm/svm.c | 8 ++++---- arch/x86/kvm/svm/svm.h | 13 ++++++++----- 3 files changed, 15 insertions(+), 10 deletions(-) diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 5d6525627681..e07e10fb52a5 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -271,6 +271,8 @@ int __init nested_svm_init_msrpm_merge_offsets(void) static bool nested_svm_merge_msrpm(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); + u32 *msrpm02 = svm->nested.msrpm; + u32 *msrpm01 = svm->msrpm; int i; /* @@ -305,7 +307,7 @@ static bool nested_svm_merge_msrpm(struct kvm_vcpu *vcpu) if (kvm_vcpu_read_guest(vcpu, offset, &value, 4)) return false; - svm->nested.msrpm[p] = svm->msrpm[p] | value; + msrpm02[p] = msrpm01[p] | value; } svm->nested.force_msr_bitmap_recalc = false; diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index fa2df1c869db..6f99031c2926 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -731,11 +731,11 @@ void svm_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type) svm->nested.force_msr_bitmap_recalc = true; } -u32 *svm_vcpu_alloc_msrpm(void) +void *svm_vcpu_alloc_msrpm(void) { unsigned int order = get_order(MSRPM_SIZE); struct page *pages = alloc_pages(GFP_KERNEL_ACCOUNT, order); - u32 *msrpm; + void *msrpm; if (!pages) return NULL; @@ -809,7 +809,7 @@ void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool intercept) svm->x2avic_msrs_intercepted = intercept; } -void svm_vcpu_free_msrpm(u32 *msrpm) +void svm_vcpu_free_msrpm(void *msrpm) { __free_pages(virt_to_page(msrpm), get_order(MSRPM_SIZE)); } @@ -1353,7 +1353,7 @@ static void svm_vcpu_free(struct kvm_vcpu *vcpu) sev_free_vcpu(vcpu); __free_page(__sme_pa_to_page(svm->vmcb01.pa)); - __free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE)); + svm_vcpu_free_msrpm(svm->msrpm); } #ifdef CONFIG_CPU_MITIGATIONS diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index d146c35b9bd2..77287c870967 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -189,8 +189,11 @@ struct svm_nested_state { u64 vmcb12_gpa; u64 last_vmcb12_gpa; - /* These are the merged vectors */ - u32 *msrpm; + /* + * The MSR permissions map used for vmcb02, which is the merge result + * of vmcb01 and vmcb12 + */ + void *msrpm; /* A VMRUN has started but has not yet been performed, so * we cannot inject a nested vmexit yet. */ @@ -271,7 +274,7 @@ struct vcpu_svm { */ u64 virt_spec_ctrl; - u32 *msrpm; + void *msrpm; ulong nmi_iret_rip; @@ -673,8 +676,8 @@ BUILD_SVM_MSR_BITMAP_HELPERS(void, set, __set) /* svm.c */ extern bool dump_invalid_vmcb; -u32 *svm_vcpu_alloc_msrpm(void); -void svm_vcpu_free_msrpm(u32 *msrpm); +void *svm_vcpu_alloc_msrpm(void); +void svm_vcpu_free_msrpm(void *msrpm); void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb); void svm_enable_lbrv(struct kvm_vcpu *vcpu); void svm_update_lbrv(struct kvm_vcpu *vcpu); -- 2.49.0.1204.g71687c7c1d-goog