On 9/8/25 08:43, Eduard Zingerman wrote:
On Fri, 2025-09-05 at 17:45 +0100, Mykyta Yatsenko wrote:
From: Mykyta Yatsenko <yatsenko@xxxxxxxx>
Introducing selftests that check BPF task work scheduling mechanism.
Validate that verifier does not accepts incorrect calls to
bpf_task_work_schedule kfunc.
Signed-off-by: Mykyta Yatsenko <yatsenko@xxxxxxxx>
---
The test cases in this patch check functional correctness, but there
is no attempt to do some stress testing of the state machine.
E.g. how hard/feasible would it be to construct a test that attempts
to exercise both branches of the (state == BPF_TW_SCHEDULED) in the
bpf_task_work_cancel_and_free()?
Good point, I have stress test, but did not include it in the patches,
as it takes longer to run.
I had to add logs in the kernel code to make sure cancellation/freeing
branches are hit.
https://github.com/kernel-patches/bpf/commit/86408b074ab0a2d290977846c3e99a07443ac604
.../selftests/bpf/prog_tests/test_task_work.c | 149 ++++++++++++++++++
tools/testing/selftests/bpf/progs/task_work.c | 108 +++++++++++++
.../selftests/bpf/progs/task_work_fail.c | 98 ++++++++++++
3 files changed, 355 insertions(+)
create mode 100644 tools/testing/selftests/bpf/prog_tests/test_task_work.c
create mode 100644 tools/testing/selftests/bpf/progs/task_work.c
create mode 100644 tools/testing/selftests/bpf/progs/task_work_fail.c
diff --git a/tools/testing/selftests/bpf/prog_tests/test_task_work.c b/tools/testing/selftests/bpf/prog_tests/test_task_work.c
new file mode 100644
index 000000000000..9c3c7a46a827
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_task_work.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include <string.h>
+#include <stdio.h>
+#include "task_work.skel.h"
+#include "task_work_fail.skel.h"
+#include <linux/bpf.h>
+#include <linux/perf_event.h>
+#include <sys/syscall.h>
+#include <time.h>
+
+static int perf_event_open(__u32 type, __u64 config, int pid)
+{
+ struct perf_event_attr attr = {
+ .type = type,
+ .config = config,
+ .size = sizeof(struct perf_event_attr),
+ .sample_period = 100000,
+ };
+
+ return syscall(__NR_perf_event_open, &attr, pid, -1, -1, 0);
+}
+
+struct elem {
+ char data[128];
+ struct bpf_task_work tw;
+};
+
+static int verify_map(struct bpf_map *map, const char *expected_data)
+{
+ int err;
+ struct elem value;
+ int processed_values = 0;
+ int k, sz;
+
+ sz = bpf_map__max_entries(map);
+ for (k = 0; k < sz; ++k) {
+ err = bpf_map__lookup_elem(map, &k, sizeof(int), &value, sizeof(struct elem), 0);
+ if (err)
+ continue;
+ if (!ASSERT_EQ(strcmp(expected_data, value.data), 0, "map data")) {
+ fprintf(stderr, "expected '%s', found '%s' in %s map", expected_data,
+ value.data, bpf_map__name(map));
+ return 2;
+ }
+ processed_values++;
+ }
+
+ return processed_values == 0;
Nit: check for exact number of expected values here?
+}
+
+static void task_work_run(const char *prog_name, const char *map_name)
+{
+ struct task_work *skel;
+ struct bpf_program *prog;
+ struct bpf_map *map;
+ struct bpf_link *link;
+ int err, pe_fd = 0, pid, status, pipefd[2];
+ char user_string[] = "hello world";
+
+ if (!ASSERT_NEQ(pipe(pipefd), -1, "pipe"))
+ return;
+
+ pid = fork();
Nit: check for negative return value?
+ if (pid == 0) {
+ __u64 num = 1;
+ int i;
+ char buf;
+
+ close(pipefd[1]);
+ read(pipefd[0], &buf, sizeof(buf));
+ close(pipefd[0]);
+
+ for (i = 0; i < 10000; ++i)
+ num *= time(0) % 7;
+ (void)num;
+ exit(0);
+ }
+ skel = task_work__open();
+ if (!ASSERT_OK_PTR(skel, "task_work__open"))
+ return;
+
+ bpf_object__for_each_program(prog, skel->obj) {
+ bpf_program__set_autoload(prog, false);
+ }
+
+ prog = bpf_object__find_program_by_name(skel->obj, prog_name);
+ if (!ASSERT_OK_PTR(prog, "prog_name"))
+ goto cleanup;
+ bpf_program__set_autoload(prog, true);
+ bpf_program__set_type(prog, BPF_PROG_TYPE_PERF_EVENT);
Nit: this is not really necessary, the programs are already defined as
SEC("perf_event").
+ skel->bss->user_ptr = (char *)user_string;
+
+ err = task_work__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
[...]