The dirty page ring is read by mmap()-ing the vCPU file descriptor, which is only possible for plane 0. This is not a problem because it is only filled by KVM_RUN which takes the plane-0 vCPU mutex, and it is therefore possible to share it for vCPUs that have the same id but are on different planes. (TODO: double check). Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> --- include/linux/kvm_host.h | 6 ++++-- virt/kvm/dirty_ring.c | 5 +++-- virt/kvm/kvm_main.c | 10 +++++----- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index d2e0c0e8ff17..b511aed2de8e 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -394,9 +394,11 @@ struct kvm_vcpu { bool scheduled_out; struct kvm_vcpu_arch arch; struct kvm_vcpu_stat *stat; - struct kvm_vcpu_stat __stat; char stats_id[KVM_STATS_NAME_SIZE]; - struct kvm_dirty_ring dirty_ring; + struct kvm_dirty_ring *dirty_ring; + + struct kvm_vcpu_stat __stat; + struct kvm_dirty_ring __dirty_ring; /* * The most recently used memslot by this vCPU and the slots generation diff --git a/virt/kvm/dirty_ring.c b/virt/kvm/dirty_ring.c index d14ffc7513ee..66e6a6a67d13 100644 --- a/virt/kvm/dirty_ring.c +++ b/virt/kvm/dirty_ring.c @@ -172,11 +172,12 @@ int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring) void kvm_dirty_ring_push(struct kvm_vcpu *vcpu, u32 slot, u64 offset) { - struct kvm_dirty_ring *ring = &vcpu->dirty_ring; + struct kvm_dirty_ring *ring = vcpu->dirty_ring; struct kvm_dirty_gfn *entry; /* It should never get full */ WARN_ON_ONCE(kvm_dirty_ring_full(ring)); + lockdep_assert_held(&vcpu->plane0->mutex); entry = &ring->dirty_gfns[ring->dirty_index & (ring->size - 1)]; @@ -204,7 +205,7 @@ bool kvm_dirty_ring_check_request(struct kvm_vcpu *vcpu) * the dirty ring is reset by userspace. */ if (kvm_check_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu) && - kvm_dirty_ring_soft_full(&vcpu->dirty_ring)) { + kvm_dirty_ring_soft_full(vcpu->dirty_ring)) { kvm_make_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu); vcpu->run->exit_reason = KVM_EXIT_DIRTY_RING_FULL; trace_kvm_dirty_ring_exit(vcpu); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 4c7e379fbf7d..863fd80ddfbe 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -466,7 +466,7 @@ static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu) { kvm_arch_vcpu_destroy(vcpu); - kvm_dirty_ring_free(&vcpu->dirty_ring); + kvm_dirty_ring_free(vcpu->dirty_ring); /* * No need for rcu_read_lock as VCPU_RUN is the only place that changes @@ -4038,7 +4038,7 @@ static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf) #endif else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff)) page = kvm_dirty_ring_get_page( - &vcpu->dirty_ring, + vcpu->dirty_ring, vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET); else return kvm_arch_vcpu_fault(vcpu, vmf); @@ -4174,7 +4174,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, unsigned long id) vcpu->run = page_address(page); if (kvm->dirty_ring_size) { - r = kvm_dirty_ring_alloc(kvm, &vcpu->dirty_ring, + r = kvm_dirty_ring_alloc(kvm, &vcpu->__dirty_ring, id, kvm->dirty_ring_size); if (r) goto vcpu_free_run_page; @@ -4242,7 +4242,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, unsigned long id) mutex_unlock(&kvm->lock); kvm_arch_vcpu_destroy(vcpu); vcpu_free_dirty_ring: - kvm_dirty_ring_free(&vcpu->dirty_ring); + kvm_dirty_ring_free(&vcpu->__dirty_ring); vcpu_free_run_page: free_page((unsigned long)vcpu->run); vcpu_free: @@ -5047,7 +5047,7 @@ static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm) mutex_lock(&kvm->slots_lock); kvm_for_each_vcpu(i, vcpu, kvm) - cleared += kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring); + cleared += kvm_dirty_ring_reset(vcpu->kvm, vcpu->dirty_ring); mutex_unlock(&kvm->slots_lock); -- 2.49.0