From: Ackerley Tng <ackerleytng@xxxxxxxxxx> For memslots backed by guest_memfd with shared mem support, the KVM MMU always faults-in pages from guest_memfd, and not from the userspace_addr. Towards this end, this patch also introduces a new guest_memfd flag, GUEST_MEMFD_FLAG_SUPPORT_SHARED, which indicates that the guest_memfd instance supports in-place shared memory. This flag is only supported if the VM creating the guest_memfd instance belongs to certain types determined by architecture. Only non-CoCo VMs are permitted to use guest_memfd with shared mem, for now. Function names have also been updated for accuracy - kvm_mem_is_private() returns true only when the current private/shared state (in the CoCo sense) of the memory is private, and returns false if the current state is shared explicitly or impicitly, e.g., belongs to a non-CoCo VM. kvm_mmu_faultin_pfn_gmem() is updated to indicate that it can be used to fault in not just private memory, but more generally, from guest_memfd. Co-developed-by: Fuad Tabba <tabba@xxxxxxxxxx> Signed-off-by: Fuad Tabba <tabba@xxxxxxxxxx> Co-developed-by: David Hildenbrand <david@xxxxxxxxxx> Signed-off-by: David Hildenbrand <david@xxxxxxxxxx> Signed-off-by: Ackerley Tng <ackerleytng@xxxxxxxxxx> --- arch/x86/kvm/mmu/mmu.c | 33 ++++++++++++++++++--------------- include/linux/kvm_host.h | 33 +++++++++++++++++++++++++++++++-- virt/kvm/guest_memfd.c | 17 +++++++++++++++++ 3 files changed, 66 insertions(+), 17 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 2b6376986f96..cfbb471f7c70 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4465,21 +4465,25 @@ static inline u8 kvm_max_level_for_order(int order) return PG_LEVEL_4K; } -static u8 kvm_max_private_mapping_level(struct kvm *kvm, kvm_pfn_t pfn, - u8 max_level, int gmem_order) +static u8 kvm_max_level_for_fault_and_order(struct kvm *kvm, + struct kvm_page_fault *fault, + int order) { - u8 req_max_level; + u8 max_level = fault->max_level; if (max_level == PG_LEVEL_4K) return PG_LEVEL_4K; - max_level = min(kvm_max_level_for_order(gmem_order), max_level); + max_level = min(kvm_max_level_for_order(order), max_level); if (max_level == PG_LEVEL_4K) return PG_LEVEL_4K; - req_max_level = kvm_x86_call(private_max_mapping_level)(kvm, pfn); - if (req_max_level) - max_level = min(max_level, req_max_level); + if (fault->is_private) { + u8 level = kvm_x86_call(private_max_mapping_level)(kvm, fault->pfn); + + if (level) + max_level = min(max_level, level); + } return max_level; } @@ -4491,10 +4495,10 @@ static void kvm_mmu_finish_page_fault(struct kvm_vcpu *vcpu, r == RET_PF_RETRY, fault->map_writable); } -static int kvm_mmu_faultin_pfn_private(struct kvm_vcpu *vcpu, - struct kvm_page_fault *fault) +static int kvm_mmu_faultin_pfn_gmem(struct kvm_vcpu *vcpu, + struct kvm_page_fault *fault) { - int max_order, r; + int gmem_order, r; if (!kvm_slot_has_gmem(fault->slot)) { kvm_mmu_prepare_memory_fault_exit(vcpu, fault); @@ -4502,15 +4506,14 @@ static int kvm_mmu_faultin_pfn_private(struct kvm_vcpu *vcpu, } r = kvm_gmem_get_pfn(vcpu->kvm, fault->slot, fault->gfn, &fault->pfn, - &fault->refcounted_page, &max_order); + &fault->refcounted_page, &gmem_order); if (r) { kvm_mmu_prepare_memory_fault_exit(vcpu, fault); return r; } fault->map_writable = !(fault->slot->flags & KVM_MEM_READONLY); - fault->max_level = kvm_max_private_mapping_level(vcpu->kvm, fault->pfn, - fault->max_level, max_order); + fault->max_level = kvm_max_level_for_fault_and_order(vcpu->kvm, fault, gmem_order); return RET_PF_CONTINUE; } @@ -4520,8 +4523,8 @@ static int __kvm_mmu_faultin_pfn(struct kvm_vcpu *vcpu, { unsigned int foll = fault->write ? FOLL_WRITE : 0; - if (fault->is_private) - return kvm_mmu_faultin_pfn_private(vcpu, fault); + if (fault->is_private || kvm_gmem_memslot_supports_shared(fault->slot)) + return kvm_mmu_faultin_pfn_gmem(vcpu, fault); foll |= FOLL_NOWAIT; fault->pfn = __kvm_faultin_pfn(fault->slot, fault->gfn, foll, diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 2ec89c214978..de7b46ee1762 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -2502,6 +2502,15 @@ static inline void kvm_prepare_memory_fault_exit(struct kvm_vcpu *vcpu, vcpu->run->memory_fault.flags |= KVM_MEMORY_EXIT_FLAG_PRIVATE; } +#ifdef CONFIG_KVM_GMEM_SHARED_MEM +bool kvm_gmem_memslot_supports_shared(const struct kvm_memory_slot *slot); +#else +static inline bool kvm_gmem_memslot_supports_shared(const struct kvm_memory_slot *slot) +{ + return false; +} +#endif /* CONFIG_KVM_GMEM_SHARED_MEM */ + #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES static inline unsigned long kvm_get_memory_attributes(struct kvm *kvm, gfn_t gfn) { @@ -2515,10 +2524,30 @@ bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm, bool kvm_arch_post_set_memory_attributes(struct kvm *kvm, struct kvm_gfn_range *range); +/* + * Returns true if the given gfn's private/shared status (in the CoCo sense) is + * private. + * + * A return value of false indicates that the gfn is explicitly or implicity + * shared (i.e., non-CoCo VMs). + */ static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn) { - return IS_ENABLED(CONFIG_KVM_GMEM) && - kvm_get_memory_attributes(kvm, gfn) & KVM_MEMORY_ATTRIBUTE_PRIVATE; + struct kvm_memory_slot *slot; + + if (!IS_ENABLED(CONFIG_KVM_GMEM)) + return false; + + slot = gfn_to_memslot(kvm, gfn); + if (kvm_slot_has_gmem(slot) && kvm_gmem_memslot_supports_shared(slot)) { + /* + * For now, memslots only support in-place shared memory if the + * host is allowed to mmap memory (i.e., non-Coco VMs). + */ + return false; + } + + return kvm_get_memory_attributes(kvm, gfn) & KVM_MEMORY_ATTRIBUTE_PRIVATE; } #else static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn) diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c index 2f499021df66..fe0245335c96 100644 --- a/virt/kvm/guest_memfd.c +++ b/virt/kvm/guest_memfd.c @@ -388,6 +388,23 @@ static int kvm_gmem_mmap(struct file *file, struct vm_area_struct *vma) return 0; } + +bool kvm_gmem_memslot_supports_shared(const struct kvm_memory_slot *slot) +{ + struct file *file; + bool ret; + + file = kvm_gmem_get_file((struct kvm_memory_slot *)slot); + if (!file) + return false; + + ret = kvm_gmem_supports_shared(file_inode(file)); + + fput(file); + return ret; +} +EXPORT_SYMBOL_GPL(kvm_gmem_memslot_supports_shared); + #else #define kvm_gmem_mmap NULL #endif /* CONFIG_KVM_GMEM_SHARED_MEM */ -- 2.49.0.1045.g170613ef41-goog