Add a new bpf_dynptr_from_mem_slice kfunc to create a dynptr from a PTR_TO_BTF_ID exposing a variable-length slice of memory, represented by the new bpf_mem_slice type. This slice is read-only, for a read-write slice we can expose a distinct type in the future. Since this is the first kfunc with potential local dynptr initialization, add it to the if-else list in check_kfunc_call. Signed-off-by: Kumar Kartikeya Dwivedi <memxor@xxxxxxxxx> --- include/linux/bpf.h | 6 ++++++ kernel/bpf/helpers.c | 37 +++++++++++++++++++++++++++++++++++++ kernel/bpf/verifier.c | 6 +++++- 3 files changed, 48 insertions(+), 1 deletion(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 3f0cc89c0622..b0ea0b71df90 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1344,6 +1344,12 @@ enum bpf_dynptr_type { BPF_DYNPTR_TYPE_XDP, }; +struct bpf_mem_slice { + void *ptr; + u32 len; + u32 reserved; +}; + int bpf_dynptr_check_size(u32 size); u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr); const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len); diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 78cefb41266a..89ab3481378d 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -2873,6 +2873,42 @@ __bpf_kfunc int bpf_dynptr_copy(struct bpf_dynptr *dst_ptr, u32 dst_off, return 0; } +/** + * bpf_dynptr_from_mem_slice - Create a dynptr from a bpf_mem_slice + * @mem_slice: Source bpf_mem_slice, backing the underlying memory for dynptr + * @flags: Flags for dynptr construction, currently no supported flags. + * @dptr__uninit: Destination dynptr, which will be initialized. + * + * Creates a dynptr that points to variable-length read-only memory represented + * by a bpf_mem_slice fat pointer. + * Returns 0 on success; negative error, otherwise. + */ +__bpf_kfunc int bpf_dynptr_from_mem_slice(struct bpf_mem_slice *mem_slice, u64 flags, struct bpf_dynptr *dptr__uninit) +{ + struct bpf_dynptr_kern *dptr = (struct bpf_dynptr_kern *)dptr__uninit; + int err; + + /* mem_slice is never NULL, as we use KF_TRUSTED_ARGS. */ + err = bpf_dynptr_check_size(mem_slice->len); + if (err) + goto error; + + /* flags is currently unsupported */ + if (flags) { + err = -EINVAL; + goto error; + } + + bpf_dynptr_init(dptr, mem_slice->ptr, BPF_DYNPTR_TYPE_LOCAL, 0, mem_slice->len); + bpf_dynptr_set_rdonly(dptr); + + return 0; + +error: + bpf_dynptr_set_null(dptr); + return err; +} + __bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj) { return obj; @@ -3327,6 +3363,7 @@ BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly) BTF_ID_FLAGS(func, bpf_dynptr_size) BTF_ID_FLAGS(func, bpf_dynptr_clone) BTF_ID_FLAGS(func, bpf_dynptr_copy) +BTF_ID_FLAGS(func, bpf_dynptr_from_mem_slice, KF_TRUSTED_ARGS) #ifdef CONFIG_NET BTF_ID_FLAGS(func, bpf_modify_return_test_tp) #endif diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 99aa2c890e7b..ff34e68c9237 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -12116,6 +12116,7 @@ enum special_kfunc_type { KF_bpf_res_spin_unlock, KF_bpf_res_spin_lock_irqsave, KF_bpf_res_spin_unlock_irqrestore, + KF_bpf_dynptr_from_mem_slice, }; BTF_SET_START(special_kfunc_set) @@ -12219,6 +12220,7 @@ BTF_ID(func, bpf_res_spin_lock) BTF_ID(func, bpf_res_spin_unlock) BTF_ID(func, bpf_res_spin_lock_irqsave) BTF_ID(func, bpf_res_spin_unlock_irqrestore) +BTF_ID(func, bpf_dynptr_from_mem_slice) static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta) { @@ -13140,7 +13142,9 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ if (is_kfunc_arg_uninit(btf, &args[i])) dynptr_arg_type |= MEM_UNINIT; - if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_skb]) { + if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_mem_slice]) { + dynptr_arg_type |= DYNPTR_TYPE_LOCAL; + } else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_skb]) { dynptr_arg_type |= DYNPTR_TYPE_SKB; } else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_xdp]) { dynptr_arg_type |= DYNPTR_TYPE_XDP; -- 2.47.1