Hi Gavin, On Wed, 21 May 2025 at 07:53, Gavin Shan <gshan@xxxxxxxxxx> wrote: > > Hi Fuad, > > On 5/14/25 2:34 AM, Fuad Tabba wrote: > > Expand the guest_memfd selftests to include testing mapping guest > > memory for VM types that support it. > > > > Also, build the guest_memfd selftest for arm64. > > > > Co-developed-by: Ackerley Tng <ackerleytng@xxxxxxxxxx> > > Signed-off-by: Ackerley Tng <ackerleytng@xxxxxxxxxx> > > Signed-off-by: Fuad Tabba <tabba@xxxxxxxxxx> > > --- > > tools/testing/selftests/kvm/Makefile.kvm | 1 + > > .../testing/selftests/kvm/guest_memfd_test.c | 145 +++++++++++++++--- > > 2 files changed, 126 insertions(+), 20 deletions(-) > > > > diff --git a/tools/testing/selftests/kvm/Makefile.kvm b/tools/testing/selftests/kvm/Makefile.kvm > > index f62b0a5aba35..ccf95ed037c3 100644 > > --- a/tools/testing/selftests/kvm/Makefile.kvm > > +++ b/tools/testing/selftests/kvm/Makefile.kvm > > @@ -163,6 +163,7 @@ TEST_GEN_PROGS_arm64 += access_tracking_perf_test > > TEST_GEN_PROGS_arm64 += arch_timer > > TEST_GEN_PROGS_arm64 += coalesced_io_test > > TEST_GEN_PROGS_arm64 += dirty_log_perf_test > > +TEST_GEN_PROGS_arm64 += guest_memfd_test > > TEST_GEN_PROGS_arm64 += get-reg-list > > TEST_GEN_PROGS_arm64 += memslot_modification_stress_test > > TEST_GEN_PROGS_arm64 += memslot_perf_test > > diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c b/tools/testing/selftests/kvm/guest_memfd_test.c > > index ce687f8d248f..443c49185543 100644 > > --- a/tools/testing/selftests/kvm/guest_memfd_test.c > > +++ b/tools/testing/selftests/kvm/guest_memfd_test.c > > @@ -34,12 +34,46 @@ static void test_file_read_write(int fd) > > "pwrite on a guest_mem fd should fail"); > > } > > > > -static void test_mmap(int fd, size_t page_size) > > +static void test_mmap_allowed(int fd, size_t page_size, size_t total_size) > > +{ > > + const char val = 0xaa; > > + char *mem; > > + size_t i; > > + int ret; > > + > > + mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); > > + TEST_ASSERT(mem != MAP_FAILED, "mmaping() guest memory should pass."); > > + > > + memset(mem, val, total_size); > > + for (i = 0; i < total_size; i++) > > + TEST_ASSERT_EQ(mem[i], val); > > + > > + ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, 0, > > + page_size); > > + TEST_ASSERT(!ret, "fallocate the first page should succeed"); > > + > > + for (i = 0; i < page_size; i++) > > + TEST_ASSERT_EQ(mem[i], 0x00); > > + for (; i < total_size; i++) > > + TEST_ASSERT_EQ(mem[i], val); > > + > > + memset(mem, val, total_size); > > + for (i = 0; i < total_size; i++) > > + TEST_ASSERT_EQ(mem[i], val); > > + > > The last memset() and check the resident values look redudant because same > test has been covered by the first memset(). If we really want to double > confirm that the page-cache is writabble, it would be enough to cover the > first page. Otherwise, I guess this hunk of code can be removed :) My goal was to check that it is in fact writable, and that it stores the expected value, after the punch_hole. I'll limit it to the first page. > > memset(mem, val, page_size); > for (i = 0; i < page_size; i++) > TEST_ASSERT_EQ(mem[i], val); > > > + ret = munmap(mem, total_size); > > + TEST_ASSERT(!ret, "munmap should succeed"); > > +} > > + > > +static void test_mmap_denied(int fd, size_t page_size, size_t total_size) > > { > > char *mem; > > > > mem = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); > > TEST_ASSERT_EQ(mem, MAP_FAILED); > > + > > + mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); > > + TEST_ASSERT_EQ(mem, MAP_FAILED); > > } > > > > static void test_file_size(int fd, size_t page_size, size_t total_size) > > @@ -120,26 +154,19 @@ static void test_invalid_punch_hole(int fd, size_t page_size, size_t total_size) > > } > > } > > > > -static void test_create_guest_memfd_invalid(struct kvm_vm *vm) > > +static void test_create_guest_memfd_invalid_sizes(struct kvm_vm *vm, > > + uint64_t guest_memfd_flags, > > + size_t page_size) > > { > > - size_t page_size = getpagesize(); > > - uint64_t flag; > > size_t size; > > int fd; > > > > for (size = 1; size < page_size; size++) { > > - fd = __vm_create_guest_memfd(vm, size, 0); > > + fd = __vm_create_guest_memfd(vm, size, guest_memfd_flags); > > TEST_ASSERT(fd == -1 && errno == EINVAL, > > "guest_memfd() with non-page-aligned page size '0x%lx' should fail with EINVAL", > > size); > > } > > - > > - for (flag = BIT(0); flag; flag <<= 1) { > > - fd = __vm_create_guest_memfd(vm, page_size, flag); > > - TEST_ASSERT(fd == -1 && errno == EINVAL, > > - "guest_memfd() with flag '0x%lx' should fail with EINVAL", > > - flag); > > - } > > } > > > > static void test_create_guest_memfd_multiple(struct kvm_vm *vm) > > @@ -170,30 +197,108 @@ static void test_create_guest_memfd_multiple(struct kvm_vm *vm) > > close(fd1); > > } > > > > -int main(int argc, char *argv[]) > > +static void test_with_type(unsigned long vm_type, uint64_t guest_memfd_flags, > > + bool expect_mmap_allowed) > > { > > - size_t page_size; > > + struct kvm_vm *vm; > > size_t total_size; > > + size_t page_size; > > int fd; > > - struct kvm_vm *vm; > > > > - TEST_REQUIRE(kvm_has_cap(KVM_CAP_GUEST_MEMFD)); > > + if (!(kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(vm_type))) > > + return; > > > > The check seems incorrect for aarch64 since 0 is always returned from > kvm_check_cap() there. The test is skipped for VM_TYPE_DEFAULT on aarch64. > So it would be something like below: > > #define VM_TYPE_DEFAULT 0 > > if (vm_type != VM_TYPE_DEFAULT && > !(kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(vm_type))) > return; Ack. Thanks for this, and for all the other reviews. Cheers, /fuad > > page_size = getpagesize(); > > total_size = page_size * 4; > > > > - vm = vm_create_barebones(); > > + vm = vm_create_barebones_type(vm_type); > > > > - test_create_guest_memfd_invalid(vm); > > test_create_guest_memfd_multiple(vm); > > + test_create_guest_memfd_invalid_sizes(vm, guest_memfd_flags, page_size); > > > > - fd = vm_create_guest_memfd(vm, total_size, 0); > > + fd = vm_create_guest_memfd(vm, total_size, guest_memfd_flags); > > > > test_file_read_write(fd); > > - test_mmap(fd, page_size); > > + > > + if (expect_mmap_allowed) > > + test_mmap_allowed(fd, page_size, total_size); > > + else > > + test_mmap_denied(fd, page_size, total_size); > > + > > test_file_size(fd, page_size, total_size); > > test_fallocate(fd, page_size, total_size); > > test_invalid_punch_hole(fd, page_size, total_size); > > > > close(fd); > > + kvm_vm_release(vm); > > +} > > + > > +static void test_vm_type_gmem_flag_validity(unsigned long vm_type, > > + uint64_t expected_valid_flags) > > +{ > > + size_t page_size = getpagesize(); > > + struct kvm_vm *vm; > > + uint64_t flag = 0; > > + int fd; > > + > > + if (!(kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(vm_type))) > > + return; > > Same as above Ack. > > + > > + vm = vm_create_barebones_type(vm_type); > > + > > + for (flag = BIT(0); flag; flag <<= 1) { > > + fd = __vm_create_guest_memfd(vm, page_size, flag); > > + > > + if (flag & expected_valid_flags) { > > + TEST_ASSERT(fd > 0, > > + "guest_memfd() with flag '0x%lx' should be valid", > > + flag); > > + close(fd); > > + } else { > > + TEST_ASSERT(fd == -1 && errno == EINVAL, > > + "guest_memfd() with flag '0x%lx' should fail with EINVAL", > > + flag); > > It's more robust to have: > > TEST_ASSERT(fd < 0 && errno == EINVAL, ...); Ack. > > + } > > + } > > + > > + kvm_vm_release(vm); > > +} > > + > > +static void test_gmem_flag_validity(void) > > +{ > > + uint64_t non_coco_vm_valid_flags = 0; > > + > > + if (kvm_has_cap(KVM_CAP_GMEM_SHARED_MEM)) > > + non_coco_vm_valid_flags = GUEST_MEMFD_FLAG_SUPPORT_SHARED; > > + > > + test_vm_type_gmem_flag_validity(VM_TYPE_DEFAULT, non_coco_vm_valid_flags); > > + > > +#ifdef __x86_64__ > > + test_vm_type_gmem_flag_validity(KVM_X86_SW_PROTECTED_VM, non_coco_vm_valid_flags); > > + test_vm_type_gmem_flag_validity(KVM_X86_SEV_VM, 0); > > + test_vm_type_gmem_flag_validity(KVM_X86_SEV_ES_VM, 0); > > + test_vm_type_gmem_flag_validity(KVM_X86_SNP_VM, 0); > > + test_vm_type_gmem_flag_validity(KVM_X86_TDX_VM, 0); > > +#endif > > +} > > + > > +int main(int argc, char *argv[]) > > +{ > > + TEST_REQUIRE(kvm_has_cap(KVM_CAP_GUEST_MEMFD)); > > + > > + test_gmem_flag_validity(); > > + > > + test_with_type(VM_TYPE_DEFAULT, 0, false); > > + if (kvm_has_cap(KVM_CAP_GMEM_SHARED_MEM)) { > > + test_with_type(VM_TYPE_DEFAULT, GUEST_MEMFD_FLAG_SUPPORT_SHARED, > > + true); > > + } > > + > > +#ifdef __x86_64__ > > + test_with_type(KVM_X86_SW_PROTECTED_VM, 0, false); > > + if (kvm_has_cap(KVM_CAP_GMEM_SHARED_MEM)) { > > + test_with_type(KVM_X86_SW_PROTECTED_VM, > > + GUEST_MEMFD_FLAG_SUPPORT_SHARED, true); > > + } > > +#endif > > } > > Thanks, > Gavin >