On Mon, Jul 7, 2025 at 9:04 AM Leon Hwang <leon.hwang@xxxxxxxxx> wrote: > > This patch adds test coverage for the new BPF_F_CPU flag support in > percpu_array maps. The following APIs are exercised: > > * bpf_map_update_batch() > * bpf_map_lookup_batch() > * bpf_map_update_elem_opts() > * bpf_map__update_elem_opts() > * bpf_map_lookup_elem_opts() > * bpf_map__lookup_elem_opts() > > cd tools/testing/selftests/bpf/ > ./test_progs -t percpu_alloc/cpu_flag_tests > 253/13 percpu_alloc/cpu_flag_tests:OK > 253 percpu_alloc:OK > Summary: 1/13 PASSED, 0 SKIPPED, 0 FAILED > > Signed-off-by: Leon Hwang <leon.hwang@xxxxxxxxx> > --- > .../selftests/bpf/prog_tests/percpu_alloc.c | 170 ++++++++++++++++++ > .../selftests/bpf/progs/percpu_array_flag.c | 24 +++ > 2 files changed, 194 insertions(+) > create mode 100644 tools/testing/selftests/bpf/progs/percpu_array_flag.c > > diff --git a/tools/testing/selftests/bpf/prog_tests/percpu_alloc.c b/tools/testing/selftests/bpf/prog_tests/percpu_alloc.c > index 343da65864d6..6f0d0e6dc76a 100644 > --- a/tools/testing/selftests/bpf/prog_tests/percpu_alloc.c > +++ b/tools/testing/selftests/bpf/prog_tests/percpu_alloc.c > @@ -3,6 +3,7 @@ > #include "percpu_alloc_array.skel.h" > #include "percpu_alloc_cgrp_local_storage.skel.h" > #include "percpu_alloc_fail.skel.h" > +#include "percpu_array_flag.skel.h" > > static void test_array(void) > { > @@ -115,6 +116,173 @@ static void test_failure(void) { > RUN_TESTS(percpu_alloc_fail); > } > > +static void test_cpu_flag(void) > +{ > + int map_fd, *keys = NULL, value_size, cpu, i, j, nr_cpus, err; > + size_t key_sz = sizeof(int), value_sz = sizeof(u64); > + struct percpu_array_flag *skel; > + u64 batch = 0, *values = NULL; > + const u64 value = 0xDEADC0DE; > + u32 count, max_entries; > + struct bpf_map *map; > + LIBBPF_OPTS(bpf_map_lookup_elem_opts, lookup_opts, > + .flags = BPF_F_CPU, > + .cpu = 0, > + ); > + LIBBPF_OPTS(bpf_map_update_elem_opts, update_opts, > + .flags = BPF_F_CPU, > + .cpu = 0, > + ); > + LIBBPF_OPTS(bpf_map_batch_opts, batch_opts, > + .elem_flags = BPF_F_CPU, > + .flags = 0, > + ); > + > + nr_cpus = libbpf_num_possible_cpus(); > + if (!ASSERT_GT(nr_cpus, 0, "libbpf_num_possible_cpus")) > + return; > + > + skel = percpu_array_flag__open_and_load(); > + if (!ASSERT_OK_PTR(skel, "percpu_array_flag__open_and_load")) > + return; > + > + map = skel->maps.percpu; > + map_fd = bpf_map__fd(map); > + max_entries = bpf_map__max_entries(map); > + > + value_size = value_sz * nr_cpus; > + values = calloc(max_entries, value_size); > + keys = calloc(max_entries, key_sz); > + if (!ASSERT_FALSE(!keys || !values, "calloc keys and values")) ASSERT_xxx are meant to be meaningful in the case that some condition fails, so using generic ASSERT_FALSE with some complicated condition is defeating that purpose. Use two separate ASSERT_OK_PTR checks instead. [...]