Introduce the struct bpf_gtramp_link, which is used to attach a bpf prog to multi functions. Meanwhile, introduce corresponding function bpf_gtrampoline_{link,unlink}_prog. The lock global_tr_lock is held during global trampoline link and unlink. We create different global trampoline for the kernel functions that have different argument count. If corresponding global_tr->image is NULL, it means such function argument count is not supported. Signed-off-by: Menglong Dong <dongml2@xxxxxxxxxxxxxxx> --- include/linux/bpf.h | 35 ++++++++ kernel/bpf/trampoline.c | 189 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 224 insertions(+) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 5dd556e89cce..70bf613d51d0 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -58,6 +58,8 @@ struct bpf_token; struct user_namespace; struct super_block; struct inode; +struct bpf_tramp_link; +struct bpf_gtramp_link; extern struct idr btf_idr; extern spinlock_t btf_idr_lock; @@ -1279,6 +1281,12 @@ struct bpf_trampoline { struct bpf_tramp_image *cur_image; }; +struct bpf_global_trampoline { + struct ftrace_ops *fops; + void *image; + int nr_args; +}; + struct bpf_attach_target_info { struct btf_func_model fmodel; long tgt_addr; @@ -1382,6 +1390,9 @@ struct bpf_trampoline *bpf_trampoline_get(u64 key, void bpf_trampoline_put(struct bpf_trampoline *tr); int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs); +int bpf_gtrampoline_link_prog(struct bpf_gtramp_link *link); +int bpf_gtrampoline_unlink_prog(struct bpf_gtramp_link *link); + /* * When the architecture supports STATIC_CALL replace the bpf_dispatcher_fn * indirection with a direct call to the bpf program. If the architecture does @@ -1490,6 +1501,14 @@ static inline bool bpf_prog_has_trampoline(const struct bpf_prog *prog) { return false; } +int bpf_gtrampoline_link_prog(struct bpf_gtramp_link *link) +{ + return -ENODEV; +} +int bpf_gtrampoline_unlink_prog(struct bpf_gtramp_link *link) +{ + return -ENODEV; +} #endif struct bpf_func_info_aux { @@ -1746,6 +1765,22 @@ struct bpf_shim_tramp_link { struct bpf_trampoline *trampoline; }; +struct bpf_gtramp_link_entry { + struct bpf_prog *tgt_prog; + struct bpf_trampoline *trampoline; + void *addr; + struct btf *attach_btf; + u64 cookie; + u32 btf_id; + u32 nr_args; +}; + +struct bpf_gtramp_link { + struct bpf_link link; + struct bpf_gtramp_link_entry *entries; + u32 entry_cnt; +}; + struct bpf_tracing_link { struct bpf_tramp_link link; enum bpf_attach_type attach_type; diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c index fa90c225c93b..f70921ce4e97 100644 --- a/kernel/bpf/trampoline.c +++ b/kernel/bpf/trampoline.c @@ -14,6 +14,7 @@ #include <linux/bpf_lsm.h> #include <linux/delay.h> #include <linux/bpf_tramp.h> +#include <linux/kfunc_md.h> /* dummy _ops. The verifier will operate on target program's ops. */ const struct bpf_verifier_ops bpf_extension_verifier_ops = { @@ -30,6 +31,10 @@ static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE]; /* serializes access to trampoline_table */ static DEFINE_MUTEX(trampoline_mutex); +static struct bpf_global_trampoline global_tr_array[MAX_BPF_FUNC_ARGS + 1]; +static DEFINE_MUTEX(global_tr_lock); +static const struct bpf_link_ops bpf_shim_tramp_link_lops; + #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mutex); @@ -646,6 +651,172 @@ int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, return err; } +#if defined(CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS) && defined(CONFIG_ARCH_HAS_BPF_GLOBAL_CALLER) +static int bpf_gtrampoline_update(struct bpf_global_trampoline *tr) +{ + struct ftrace_ops *fops; + int ips_count, err = 0; + void **ips = NULL; + + ips_count = kfunc_md_bpf_ips(&ips, tr->nr_args); + if (ips_count < 0) { + err = ips_count; + goto out; + } + + fops = tr->fops; + if (ips_count == 0) { + if (!(fops->flags & FTRACE_OPS_FL_ENABLED)) + goto out; + err = unregister_ftrace_direct(fops, (unsigned long)tr->image, + true); + goto out; + } + + if (fops->flags & FTRACE_OPS_FL_ENABLED) { + err = reset_ftrace_direct_ips(fops, (unsigned long *)ips, + ips_count); + goto out; + } + + err = ftrace_set_filter_ips(tr->fops, (unsigned long *)ips, + ips_count, 0, 1); + if (err) + goto out; + + err = register_ftrace_direct(fops, (unsigned long)tr->image); +out: + kfree(ips); + + return err; +} + +static int bpf_gtrampoline_update_all(void) +{ + struct bpf_global_trampoline *gtr; + int err; + + for (int i = 0; i <= MAX_BPF_FUNC_ARGS; i++) { + gtr = &global_tr_array[i]; + if (!gtr->image) + break; + err = bpf_gtrampoline_update(gtr); + if (err) + return err; + } + + return 0; +} +#else +static int bpf_gtrampoline_update_all(void) +{ + return -ENODEV; +} +#endif + +static int __bpf_gtrampoline_unlink_prog(struct bpf_gtramp_link *link, + u32 cnt) +{ + enum bpf_tramp_prog_type kind; + struct kfunc_md *md; + int err = 0; + + kind = bpf_attach_type_to_tramp(link->link.prog); + + /* remove the prog from all the coressponding md */ + for (int i = 0; i < link->entry_cnt; i++) { + md = kfunc_md_get((long)link->entries[i].addr); + if (WARN_ON_ONCE(!md)) + continue; + + err = kfunc_md_bpf_unlink(md, link->link.prog, kind); + if (err) + return err; + } + + bpf_gtrampoline_update_all(); + for (int i = 0; i < cnt; i++) + kfunc_md_put_ip((long)link->entries[i].addr); + + return 0; +} + +int bpf_gtrampoline_unlink_prog(struct bpf_gtramp_link *link) +{ + int err; + + /* hold the global trampoline lock, to make the target functions + * consist during we unlink the prog. + */ + mutex_lock(&global_tr_lock); + err = __bpf_gtrampoline_unlink_prog(link, link->entry_cnt); + mutex_unlock(&global_tr_lock); + + return err; +} + +int bpf_gtrampoline_link_prog(struct bpf_gtramp_link *link) +{ + struct bpf_gtramp_link_entry *entry; + enum bpf_tramp_prog_type kind; + struct bpf_prog *prog; + struct kfunc_md *md; + bool update = false; + int err = 0, i; + + /* check if the function arguments count is supported by the arch */ + for (int i = 0; i < link->entry_cnt; i++) { + entry = &link->entries[i]; + if (entry->nr_args > MAX_BPF_FUNC_ARGS || + !global_tr_array[entry->nr_args].image) + return -EOPNOTSUPP; + } + + prog = link->link.prog; + kind = bpf_attach_type_to_tramp(prog); + + /* hold the global trampoline lock, to make the target functions + * consist during we link the prog. + */ + mutex_lock(&global_tr_lock); + + /* update the bpf prog to all the corresponding function metadata */ + for (i = 0; i < link->entry_cnt; i++) { + entry = &link->entries[i]; + md = kfunc_md_create((long)entry->addr, entry->nr_args); + if (md) { + /* the function is not in the filter hash of gtr, + * we need update the global trampoline. + */ + if (!md->bpf_prog_cnt) + update = true; + err = kfunc_md_bpf_link(md, prog, kind, entry->cookie); + } else { + err = -ENOMEM; + } + + if (err) { + kfunc_md_put(md); + goto on_fallback; + } + } + + if (update) { + err = bpf_gtrampoline_update_all(); + if (err) + goto on_fallback; + } + mutex_unlock(&global_tr_lock); + + return 0; + +on_fallback: + __bpf_gtrampoline_unlink_prog(link, i); + mutex_unlock(&global_tr_lock); + + return err; +} + #if defined(CONFIG_CGROUP_BPF) && defined(CONFIG_BPF_LSM) static void bpf_shim_tramp_link_release(struct bpf_link *link) { @@ -1117,6 +1288,24 @@ static int __init init_trampolines(void) { int i; + for (i = 0; i <= MAX_BPF_FUNC_ARGS; i++) { + struct bpf_global_trampoline *global_tr; + + global_tr = &global_tr_array[i]; + global_tr->nr_args = i; +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS + global_tr->fops = kzalloc(sizeof(struct ftrace_ops), GFP_KERNEL); + if (!global_tr->fops) + return -ENOMEM; + + global_tr->fops->private = global_tr; + global_tr->fops->ops_func = bpf_tramp_ftrace_ops_func; +#endif +#ifdef CONFIG_ARCH_HAS_BPF_GLOBAL_CALLER + global_tr->image = bpf_gloabl_caller_array[i]; +#endif + } + for (i = 0; i < TRAMPOLINE_TABLE_SIZE; i++) INIT_HLIST_HEAD(&trampoline_table[i]); return 0; -- 2.39.5