Re: [PATCH v11 18/18] KVM: selftests: guest_memfd mmap() test when mapping is allowed

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi James,

On Thu, 5 Jun 2025 at 23:07, James Houghton <jthoughton@xxxxxxxxxx> wrote:
>
> On Thu, Jun 5, 2025 at 8:38 AM Fuad Tabba <tabba@xxxxxxxxxx> wrote:
> >
> > Expand the guest_memfd selftests to include testing mapping guest
> > memory for VM types that support it.
> >
> > Co-developed-by: Ackerley Tng <ackerleytng@xxxxxxxxxx>
> > Signed-off-by: Ackerley Tng <ackerleytng@xxxxxxxxxx>
> > Signed-off-by: Fuad Tabba <tabba@xxxxxxxxxx>
>
> Feel free to add:
>
> Reviewed-by: James Houghton <jthoughton@xxxxxxxxxx>

Thanks!

> > ---
> >  .../testing/selftests/kvm/guest_memfd_test.c  | 201 ++++++++++++++++--
> >  1 file changed, 180 insertions(+), 21 deletions(-)
> >
> > diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c b/tools/testing/selftests/kvm/guest_memfd_test.c
> > index 341ba616cf55..1612d3adcd0d 100644
> > --- a/tools/testing/selftests/kvm/guest_memfd_test.c
> > +++ b/tools/testing/selftests/kvm/guest_memfd_test.c
> > @@ -13,6 +13,8 @@
> >
> >  #include <linux/bitmap.h>
> >  #include <linux/falloc.h>
> > +#include <setjmp.h>
> > +#include <signal.h>
> >  #include <sys/mman.h>
> >  #include <sys/types.h>
> >  #include <sys/stat.h>
> > @@ -34,12 +36,83 @@ static void test_file_read_write(int fd)
> >                     "pwrite on a guest_mem fd should fail");
> >  }
> >
> > -static void test_mmap(int fd, size_t page_size)
> > +static void test_mmap_supported(int fd, size_t page_size, size_t total_size)
> > +{
> > +       const char val = 0xaa;
> > +       char *mem;
>
> This must be `volatile char *` to ensure that the compiler doesn't
> elide the accesses you have written.
>
> > +       size_t i;
> > +       int ret;
> > +
> > +       mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
> > +       TEST_ASSERT(mem == MAP_FAILED, "Copy-on-write not allowed by guest_memfd.");
> > +
> > +       mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
> > +       TEST_ASSERT(mem != MAP_FAILED, "mmap() for shared guest memory should succeed.");
> > +
> > +       memset(mem, val, total_size);
>
> Now unfortunately, `memset` and `munmap` will complain about the
> volatile qualification. So...
>
> memset((char *)mem, val, total_size);
>
> Eh... wish they just wouldn't complain, but this is a small price to
> pay for correctness. :)
>
> > +       for (i = 0; i < total_size; i++)
> > +               TEST_ASSERT_EQ(mem[i], val);
>
> The compiler is allowed to[1] elide the read of `mem[i]` and just
> assume that it is `val`.
>
> [1]: https://godbolt.org/z/Wora54bP6
>
> Feel free to add `volatile` to that snippet to see how the code changes.

Having tried that and Sean's READ_ONCE() suggestion, I went with the
latter. Like Sean said, they're not optimised out, and avoid the need
to cast.

> > +
> > +       ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, 0,
> > +                       page_size);
> > +       TEST_ASSERT(!ret, "fallocate the first page should succeed.");
> > +
> > +       for (i = 0; i < page_size; i++)
> > +               TEST_ASSERT_EQ(mem[i], 0x00);
> > +       for (; i < total_size; i++)
> > +               TEST_ASSERT_EQ(mem[i], val);
> > +
> > +       memset(mem, val, page_size);
> > +       for (i = 0; i < total_size; i++)
> > +               TEST_ASSERT_EQ(mem[i], val);
> > +
> > +       ret = munmap(mem, total_size);
> > +       TEST_ASSERT(!ret, "munmap() should succeed.");
> > +}
> > +
> > +static sigjmp_buf jmpbuf;
> > +void fault_sigbus_handler(int signum)
> > +{
> > +       siglongjmp(jmpbuf, 1);
> > +}
> > +
> > +static void test_fault_overflow(int fd, size_t page_size, size_t total_size)
> > +{
> > +       struct sigaction sa_old, sa_new = {
> > +               .sa_handler = fault_sigbus_handler,
> > +       };
> > +       size_t map_size = total_size * 4;
> > +       const char val = 0xaa;
> > +       char *mem;
>
> `volatile` here as well.
>
> > +       size_t i;
> > +       int ret;
> > +
> > +       mem = mmap(NULL, map_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
> > +       TEST_ASSERT(mem != MAP_FAILED, "mmap() for shared guest memory should succeed.");
> > +
> > +       sigaction(SIGBUS, &sa_new, &sa_old);
> > +       if (sigsetjmp(jmpbuf, 1) == 0) {
> > +               memset(mem, 0xaa, map_size);
> > +               TEST_ASSERT(false, "memset() should have triggered SIGBUS.");
> > +       }
> > +       sigaction(SIGBUS, &sa_old, NULL);
> > +
> > +       for (i = 0; i < total_size; i++)
> > +               TEST_ASSERT_EQ(mem[i], val);
> > +
> > +       ret = munmap(mem, map_size);
> > +       TEST_ASSERT(!ret, "munmap() should succeed.");
> > +}
> > +
> > +static void test_mmap_not_supported(int fd, size_t page_size, size_t total_size)
> >  {
> >         char *mem;
> >
> >         mem = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
> >         TEST_ASSERT_EQ(mem, MAP_FAILED);
> > +
> > +       mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
> > +       TEST_ASSERT_EQ(mem, MAP_FAILED);
> >  }
> >
> >  static void test_file_size(int fd, size_t page_size, size_t total_size)
> > @@ -120,26 +193,19 @@ static void test_invalid_punch_hole(int fd, size_t page_size, size_t total_size)
> >         }
> >  }
> >
> > -static void test_create_guest_memfd_invalid(struct kvm_vm *vm)
> > +static void test_create_guest_memfd_invalid_sizes(struct kvm_vm *vm,
> > +                                                 uint64_t guest_memfd_flags,
> > +                                                 size_t page_size)
> >  {
> > -       size_t page_size = getpagesize();
> > -       uint64_t flag;
> >         size_t size;
> >         int fd;
> >
> >         for (size = 1; size < page_size; size++) {
> > -               fd = __vm_create_guest_memfd(vm, size, 0);
> > -               TEST_ASSERT(fd == -1 && errno == EINVAL,
> > +               fd = __vm_create_guest_memfd(vm, size, guest_memfd_flags);
> > +               TEST_ASSERT(fd < 0 && errno == EINVAL,
> >                             "guest_memfd() with non-page-aligned page size '0x%lx' should fail with EINVAL",
> >                             size);
> >         }
> > -
> > -       for (flag = BIT(0); flag; flag <<= 1) {
> > -               fd = __vm_create_guest_memfd(vm, page_size, flag);
> > -               TEST_ASSERT(fd == -1 && errno == EINVAL,
> > -                           "guest_memfd() with flag '0x%lx' should fail with EINVAL",
> > -                           flag);
> > -       }
> >  }
> >
> >  static void test_create_guest_memfd_multiple(struct kvm_vm *vm)
> > @@ -171,30 +237,123 @@ static void test_create_guest_memfd_multiple(struct kvm_vm *vm)
> >         close(fd1);
> >  }
> >
> > -int main(int argc, char *argv[])
> > +static bool check_vm_type(unsigned long vm_type)
> >  {
> > -       size_t page_size;
> > +       /*
> > +        * Not all architectures support KVM_CAP_VM_TYPES. However, those that
> > +        * support guest_memfd have that support for the default VM type.
> > +        */
> > +       if (vm_type == VM_TYPE_DEFAULT)
> > +               return true;
> > +
> > +       return kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(vm_type);
> > +}
> > +
> > +static void test_with_type(unsigned long vm_type, uint64_t guest_memfd_flags,
> > +                          bool expect_mmap_allowed)
> > +{
> > +       struct kvm_vm *vm;
> >         size_t total_size;
> > +       size_t page_size;
> >         int fd;
> > -       struct kvm_vm *vm;
> >
> > -       TEST_REQUIRE(kvm_has_cap(KVM_CAP_GUEST_MEMFD));
> > +       if (!check_vm_type(vm_type))
> > +               return;
> >
> >         page_size = getpagesize();
> >         total_size = page_size * 4;
> >
> > -       vm = vm_create_barebones();
> > +       vm = vm_create_barebones_type(vm_type);
> >
> > -       test_create_guest_memfd_invalid(vm);
> >         test_create_guest_memfd_multiple(vm);
> > +       test_create_guest_memfd_invalid_sizes(vm, guest_memfd_flags, page_size);
> >
> > -       fd = vm_create_guest_memfd(vm, total_size, 0);
> > +       fd = vm_create_guest_memfd(vm, total_size, guest_memfd_flags);
> >
> >         test_file_read_write(fd);
> > -       test_mmap(fd, page_size);
> > +
> > +       if (expect_mmap_allowed) {
> > +               test_mmap_supported(fd, page_size, total_size);
> > +               test_fault_overflow(fd, page_size, total_size);
> > +
> > +       } else {
> > +               test_mmap_not_supported(fd, page_size, total_size);
> > +       }
> > +
> >         test_file_size(fd, page_size, total_size);
> >         test_fallocate(fd, page_size, total_size);
> >         test_invalid_punch_hole(fd, page_size, total_size);
> >
> >         close(fd);
> > +       kvm_vm_release(vm);
>
> I think kvm_vm_free() is probably more appropriate?

Ack (for both).

Cheers,
/fuad

> > +}
> > +
> > +static void test_vm_type_gmem_flag_validity(unsigned long vm_type,
> > +                                           uint64_t expected_valid_flags)
> > +{
> > +       size_t page_size = getpagesize();
> > +       struct kvm_vm *vm;
> > +       uint64_t flag = 0;
> > +       int fd;
> > +
> > +       if (!check_vm_type(vm_type))
> > +               return;
> > +
> > +       vm = vm_create_barebones_type(vm_type);
> > +
> > +       for (flag = BIT(0); flag; flag <<= 1) {
> > +               fd = __vm_create_guest_memfd(vm, page_size, flag);
> > +
> > +               if (flag & expected_valid_flags) {
> > +                       TEST_ASSERT(fd >= 0,
> > +                                   "guest_memfd() with flag '0x%lx' should be valid",
> > +                                   flag);
> > +                       close(fd);
> > +               } else {
> > +                       TEST_ASSERT(fd < 0 && errno == EINVAL,
> > +                                   "guest_memfd() with flag '0x%lx' should fail with EINVAL",
> > +                                   flag);
> > +               }
> > +       }
> > +
> > +       kvm_vm_release(vm);
>
> Same here.
>
> > +}
> > +
> > +static void test_gmem_flag_validity(void)
> > +{
> > +       uint64_t non_coco_vm_valid_flags = 0;
> > +
> > +       if (kvm_has_cap(KVM_CAP_GMEM_SHARED_MEM))
> > +               non_coco_vm_valid_flags = GUEST_MEMFD_FLAG_SUPPORT_SHARED;
> > +
> > +       test_vm_type_gmem_flag_validity(VM_TYPE_DEFAULT, non_coco_vm_valid_flags);
> > +
> > +#ifdef __x86_64__
> > +       test_vm_type_gmem_flag_validity(KVM_X86_SW_PROTECTED_VM, non_coco_vm_valid_flags);
> > +       test_vm_type_gmem_flag_validity(KVM_X86_SEV_VM, 0);
> > +       test_vm_type_gmem_flag_validity(KVM_X86_SEV_ES_VM, 0);
> > +       test_vm_type_gmem_flag_validity(KVM_X86_SNP_VM, 0);
> > +       test_vm_type_gmem_flag_validity(KVM_X86_TDX_VM, 0);
> > +#endif
> > +}
> > +
> > +int main(int argc, char *argv[])
> > +{
> > +       TEST_REQUIRE(kvm_has_cap(KVM_CAP_GUEST_MEMFD));
> > +
> > +       test_gmem_flag_validity();
> > +
> > +       test_with_type(VM_TYPE_DEFAULT, 0, false);
> > +       if (kvm_has_cap(KVM_CAP_GMEM_SHARED_MEM)) {
> > +               test_with_type(VM_TYPE_DEFAULT, GUEST_MEMFD_FLAG_SUPPORT_SHARED,
> > +                              true);
> > +       }
> > +
> > +#ifdef __x86_64__
> > +       test_with_type(KVM_X86_SW_PROTECTED_VM, 0, false);
> > +       if (kvm_has_cap(KVM_CAP_GMEM_SHARED_MEM)) {
> > +               test_with_type(KVM_X86_SW_PROTECTED_VM,
> > +                              GUEST_MEMFD_FLAG_SUPPORT_SHARED, true);
> > +       }
> > +#endif
> >  }
> > --
> > 2.49.0.1266.g31b7d2e469-goog
> >





[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux