Re: [PATCH bpf-next v3 3/4] bpf, bpftool: Generate skeleton for global percpu data

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Adding libbpf-rs maintainer, Daniel, for awareness, as Rust skeleton
will have to add support for this, once this patch set lands upstream.


On Mon, May 26, 2025 at 9:22 AM Leon Hwang <leon.hwang@xxxxxxxxx> wrote:
>
> This patch enhances bpftool to generate skeletons that properly handle
> global percpu variables. The generated skeleton now includes a dedicated
> structure for percpu data, allowing users to initialize and access percpu
> variables more efficiently.
>
> For global percpu variables, the skeleton now includes a nested
> structure, e.g.:
>
> struct test_global_percpu_data {
>         struct bpf_object_skeleton *skeleton;
>         struct bpf_object *obj;
>         struct {
>                 struct bpf_map *data__percpu;
>         } maps;
>         // ...
>         struct test_global_percpu_data__data__percpu {
>                 int data;
>                 char run;
>                 struct {
>                         char set;
>                         int i;
>                         int nums[7];
>                 } struct_data;
>                 int nums[7];
>         } __aligned(8) *data__percpu;
>
>         // ...
> };
>
>   * The "struct test_global_percpu_data__data__percpu *data__percpu" points
>     to initialized data, which is actually "maps.data__percpu->mmaped".
>   * Before loading the skeleton, updating the
>     "struct test_global_percpu_data__data__percpu *data__percpu" modifies
>     the initial value of the corresponding global percpu variables.
>   * After loading the skeleton, accessing or updating this struct is not
>     allowed because this struct pointer has been reset as NULL. Instead,
>     users must interact with the global percpu variables via the
>     "maps.data__percpu" map.
>
> Signed-off-by: Leon Hwang <leon.hwang@xxxxxxxxx>
> ---
>  tools/bpf/bpftool/gen.c | 47 +++++++++++++++++++++++++++++------------
>  1 file changed, 34 insertions(+), 13 deletions(-)
>
> diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
> index 67a60114368f5..c672f52110221 100644
> --- a/tools/bpf/bpftool/gen.c
> +++ b/tools/bpf/bpftool/gen.c
> @@ -92,7 +92,7 @@ static void get_header_guard(char *guard, const char *obj_name, const char *suff
>
>  static bool get_map_ident(const struct bpf_map *map, char *buf, size_t buf_sz)
>  {
> -       static const char *sfxs[] = { ".data", ".rodata", ".bss", ".kconfig" };
> +       static const char *sfxs[] = { ".data..percpu", ".data", ".rodata", ".bss", ".kconfig" };
>         const char *name = bpf_map__name(map);
>         int i, n;
>
> @@ -117,7 +117,7 @@ static bool get_map_ident(const struct bpf_map *map, char *buf, size_t buf_sz)
>
>  static bool get_datasec_ident(const char *sec_name, char *buf, size_t buf_sz)
>  {
> -       static const char *pfxs[] = { ".data", ".rodata", ".bss", ".kconfig" };
> +       static const char *pfxs[] = { ".data..percpu", ".data", ".rodata", ".bss", ".kconfig" };
>         int i, n;
>
>         /* recognize hard coded LLVM section name */
> @@ -148,7 +148,8 @@ static int codegen_datasec_def(struct bpf_object *obj,
>                                struct btf *btf,
>                                struct btf_dump *d,
>                                const struct btf_type *sec,
> -                              const char *obj_name)
> +                              const char *obj_name,
> +                              bool is_percpu)
>  {
>         const char *sec_name = btf__name_by_offset(btf, sec->name_off);
>         const struct btf_var_secinfo *sec_var = btf_var_secinfos(sec);
> @@ -228,7 +229,7 @@ static int codegen_datasec_def(struct bpf_object *obj,
>
>                 off = sec_var->offset + sec_var->size;
>         }
> -       printf("        } *%s;\n", sec_ident);
> +       printf("        }%s *%s;\n", is_percpu ? " __aligned(8)" : "", sec_ident);
>         return 0;
>  }
>
> @@ -263,13 +264,13 @@ static bool is_mmapable_map(const struct bpf_map *map, char *buf, size_t sz)
>                 return true;
>         }
>
> -       if (!bpf_map__is_internal(map) || !(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
> -               return false;
> -
> -       if (!get_map_ident(map, buf, sz))
> -               return false;
> +       if (bpf_map__is_internal(map) &&
> +           ((bpf_map__map_flags(map) & BPF_F_MMAPABLE) ||
> +            bpf_map__is_internal_percpu(map)) &&
> +           get_map_ident(map, buf, sz))
> +               return true;
>
> -       return true;
> +       return false;
>  }
>
>  static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
> @@ -303,7 +304,8 @@ static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
>                         printf("        struct %s__%s {\n", obj_name, map_ident);
>                         printf("        } *%s;\n", map_ident);
>                 } else {
> -                       err = codegen_datasec_def(obj, btf, d, sec, obj_name);
> +                       err = codegen_datasec_def(obj, btf, d, sec, obj_name,
> +                                                 bpf_map__is_internal_percpu(map));
>                         if (err)
>                                 goto out;
>                 }
> @@ -795,7 +797,8 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h
>         bpf_object__for_each_map(map, obj) {
>                 const char *mmap_flags;
>
> -               if (!is_mmapable_map(map, ident, sizeof(ident)))
> +               if (!is_mmapable_map(map, ident, sizeof(ident)) ||
> +                   bpf_map__is_internal_percpu(map))
>                         continue;
>
>                 if (bpf_map__map_flags(map) & BPF_F_RDONLY_PROG)
> @@ -1434,7 +1437,25 @@ static int do_skeleton(int argc, char **argv)
>                 static inline int                                           \n\
>                 %1$s__load(struct %1$s *obj)                                \n\
>                 {                                                           \n\
> -                       return bpf_object__load_skeleton(obj->skeleton);    \n\
> +                       int err;                                            \n\
> +                                                                           \n\
> +                       err = bpf_object__load_skeleton(obj->skeleton);     \n\
> +                       if (err)                                            \n\
> +                               return err;                                 \n\
> +                                                                           \n\
> +               ", obj_name);
> +
> +       if (map_cnt) {
> +               bpf_object__for_each_map(map, obj) {
> +                       if (bpf_map__is_internal_percpu(map) &&
> +                           get_map_ident(map, ident, sizeof(ident)))
> +                               printf("\tobj->%s = NULL;\n", ident);
> +               }
> +       }

hm... maybe we can avoid this by making libbpf re-mmap() this
initialization image to be read-only during bpf_object load? Then the
pointer can stay in the skeleton and be available for querying of
"initialization values" (if anyone cares), and we won't have any extra
post-processing steps in code generated skeleton code?

And Rust skeleton will be able to expose this as a non-mutable
reference with no extra magic behind it?


> +
> +       codegen("\
> +               \n\
> +                       return 0;                                           \n\
>                 }                                                           \n\
>                                                                             \n\
>                 static inline struct %1$s *                                 \n\
> --
> 2.49.0
>





[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux