Add a new bpf_dynptr_from_mem_slice kfunc to create a dynptr from a PTR_TO_BTF_ID exposing a variable-length slice of memory, represented by the new bpf_mem_slice type. This slice is read-only, for a read-write slice we can expose a distinct type in the future. We rely on the previous commits ensuring source objects underpinning dynptr memory are tracked correctly for invalidation to ensure when a PTR_TO_BTF_ID holding a memory slice goes away, it's corresponding dynptrs get invalidated. Signed-off-by: Kumar Kartikeya Dwivedi <memxor@xxxxxxxxx> --- include/linux/bpf.h | 5 +++++ kernel/bpf/helpers.c | 32 ++++++++++++++++++++++++++++++++ kernel/bpf/verifier.c | 6 +++++- 3 files changed, 42 insertions(+), 1 deletion(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 3f0cc89c0622..9feaa9bbf0a4 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1344,6 +1344,11 @@ enum bpf_dynptr_type { BPF_DYNPTR_TYPE_XDP, }; +struct bpf_mem_slice { + void *ptr; + size_t len; +}; + int bpf_dynptr_check_size(u32 size); u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr); const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len); diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index e3a2662f4e33..95e9c9df6062 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -2826,6 +2826,37 @@ __bpf_kfunc int bpf_dynptr_copy(struct bpf_dynptr *dst_ptr, u32 dst_off, return 0; } +/** + * XXX + */ +__bpf_kfunc int bpf_dynptr_from_mem_slice(struct bpf_mem_slice *mem_slice, u64 flags, struct bpf_dynptr *dptr__uninit) +{ + struct bpf_dynptr_kern *dptr = (struct bpf_dynptr_kern *)dptr__uninit; + int err; + + if (!mem_slice) + return -EINVAL; + + err = bpf_dynptr_check_size(mem_slice->len); + if (err) + goto error; + + /* flags is currently unsupported */ + if (flags) { + err = -EINVAL; + goto error; + } + + bpf_dynptr_init(dptr, mem_slice->ptr, BPF_DYNPTR_TYPE_LOCAL, 0, mem_slice->len); + bpf_dynptr_set_rdonly(dptr); + + return 0; + +error: + bpf_dynptr_set_null(dptr); + return err; +} + __bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj) { return obj; @@ -3275,6 +3306,7 @@ BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly) BTF_ID_FLAGS(func, bpf_dynptr_size) BTF_ID_FLAGS(func, bpf_dynptr_clone) BTF_ID_FLAGS(func, bpf_dynptr_copy) +BTF_ID_FLAGS(func, bpf_dynptr_from_mem_slice, KF_TRUSTED_ARGS) #ifdef CONFIG_NET BTF_ID_FLAGS(func, bpf_modify_return_test_tp) #endif diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 7e09c4592038..26aa70cd5734 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -12125,6 +12125,7 @@ enum special_kfunc_type { KF_bpf_res_spin_unlock, KF_bpf_res_spin_lock_irqsave, KF_bpf_res_spin_unlock_irqrestore, + KF_bpf_dynptr_from_mem_slice, }; BTF_SET_START(special_kfunc_set) @@ -12218,6 +12219,7 @@ BTF_ID(func, bpf_res_spin_lock) BTF_ID(func, bpf_res_spin_unlock) BTF_ID(func, bpf_res_spin_lock_irqsave) BTF_ID(func, bpf_res_spin_unlock_irqrestore) +BTF_ID(func, bpf_dynptr_from_mem_slice) static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta) { @@ -13139,7 +13141,9 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ } } - if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_skb]) { + if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_mem_slice]) { + dynptr_arg_type |= DYNPTR_TYPE_LOCAL; + } else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_skb]) { dynptr_arg_type |= DYNPTR_TYPE_SKB; } else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_xdp]) { dynptr_arg_type |= DYNPTR_TYPE_XDP; -- 2.47.1