Add some of the data to move from one plane to the other within a VM, typically from plane N to plane 0. There is quite some difference here because while separate planes provide very little of the vm file descriptor functionality, they are almost fully functional vCPUs except that non-zero planes(*) can only be ran indirectly through the initial plane. Therefore, vCPUs use struct kvm_vcpu for all planes, with just a couple fields that will be added later and will only be valid for plane 0. At the VM level instead plane info is stored in a completely different struct. For now struct kvm_plane has no architecture-specific counterpart, but this may change in the future if needed. It's possible for example that some MMU info becomes per-plane in order to support per-plane RWX permissions. (*) I will restrain from calling them astral planes. Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> --- include/linux/kvm_host.h | 17 ++++++++++++++++- include/linux/kvm_types.h | 1 + virt/kvm/kvm_main.c | 32 ++++++++++++++++++++++++++++++++ 3 files changed, 49 insertions(+), 1 deletion(-) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index c8f1facdb600..0e16c34080ef 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -84,6 +84,10 @@ #define KVM_MAX_NR_ADDRESS_SPACES 1 #endif +#ifndef KVM_MAX_VCPU_PLANES +#define KVM_MAX_VCPU_PLANES 1 +#endif + /* * For the normal pfn, the highest 12 bits should be zero, * so we can mask bit 62 ~ bit 52 to indicate the error pfn, @@ -332,7 +336,8 @@ struct kvm_vcpu { #ifdef CONFIG_PROVE_RCU int srcu_depth; #endif - int mode; + short plane; + short mode; u64 requests; unsigned long guest_debug; @@ -367,6 +372,8 @@ struct kvm_vcpu { } async_pf; #endif + struct kvm_vcpu *plane0; + #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT /* * Cpu relax intercept or pause loop exit optimization @@ -753,6 +760,11 @@ struct kvm_memslots { int node_idx; }; +struct kvm_plane { + struct kvm *kvm; + int plane; +}; + struct kvm { #ifdef KVM_HAVE_MMU_RWLOCK rwlock_t mmu_lock; @@ -777,6 +789,9 @@ struct kvm { /* The current active memslot set for each address space */ struct kvm_memslots __rcu *memslots[KVM_MAX_NR_ADDRESS_SPACES]; struct xarray vcpu_array; + + struct kvm_plane *planes[KVM_MAX_VCPU_PLANES]; + /* * Protected by slots_lock, but can be read outside if an * incorrect answer is acceptable. diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index 827ecc0b7e10..7d0a86108d1a 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -11,6 +11,7 @@ struct kvm_interrupt; struct kvm_irq_routing_table; struct kvm_memory_slot; struct kvm_one_reg; +struct kvm_plane; struct kvm_run; struct kvm_userspace_memory_region; struct kvm_vcpu; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index f6c947961b78..67773b6b9576 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1095,9 +1095,22 @@ void __weak kvm_arch_create_vm_debugfs(struct kvm *kvm) { } +static struct kvm_plane *kvm_create_vm_plane(struct kvm *kvm, unsigned plane_id) +{ + struct kvm_plane *plane = kzalloc(sizeof(struct kvm_plane), GFP_KERNEL_ACCOUNT); + + if (!plane) + return ERR_PTR(-ENOMEM); + + plane->kvm = kvm; + plane->plane = plane_id; + return plane; +} + static struct kvm *kvm_create_vm(unsigned long type, const char *fdname) { struct kvm *kvm = kvm_arch_alloc_vm(); + struct kvm_plane *plane0; struct kvm_memslots *slots; int r, i, j; @@ -1136,6 +1149,13 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname) snprintf(kvm->stats_id, sizeof(kvm->stats_id), "kvm-%d", task_pid_nr(current)); + plane0 = kvm_create_vm_plane(kvm, 0); + if (IS_ERR(plane0)) { + r = PTR_ERR(plane0); + goto out_err_no_plane0; + } + kvm->planes[0] = plane0; + r = -ENOMEM; if (init_srcu_struct(&kvm->srcu)) goto out_err_no_srcu; @@ -1227,6 +1247,8 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname) out_err_no_irq_srcu: cleanup_srcu_struct(&kvm->srcu); out_err_no_srcu: + kfree(kvm->planes[0]); +out_err_no_plane0: kvm_arch_free_vm(kvm); mmdrop(current->mm); return ERR_PTR(r); @@ -1253,6 +1275,10 @@ static void kvm_destroy_devices(struct kvm *kvm) } } +static void kvm_destroy_plane(struct kvm_plane *plane) +{ +} + static void kvm_destroy_vm(struct kvm *kvm) { int i; @@ -1309,6 +1335,11 @@ static void kvm_destroy_vm(struct kvm *kvm) #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES xa_destroy(&kvm->mem_attr_array); #endif + for (i = 0; i < ARRAY_SIZE(kvm->planes); i++) { + struct kvm_plane *plane = kvm->planes[i]; + if (plane) + kvm_destroy_plane(plane); + } kvm_arch_free_vm(kvm); preempt_notifier_dec(); kvm_disable_virtualization(); @@ -4110,6 +4141,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, unsigned long id) } vcpu->run = page_address(page); + vcpu->plane0 = vcpu; kvm_vcpu_init(vcpu, kvm, id); r = kvm_arch_vcpu_create(vcpu); -- 2.49.0