There are some failure paths in bpf_int_jit_compile() that are not worth triggering a warning in __bpf_prog_ret0_warn(). For example, if we fail to allocate memory in bpf_int_jit_compile(), we should propagate -ENOMEM to userspace instead of attaching __bpf_prog_ret0_warn(). Let's pass &err to bpf_int_jit_compile() to propagate errno. Signed-off-by: Kuniyuki Iwashima <kuniyu@xxxxxxxxxx> --- arch/arc/net/bpf_jit_core.c | 2 +- arch/arm/net/bpf_jit_32.c | 2 +- arch/arm64/net/bpf_jit_comp.c | 2 +- arch/loongarch/net/bpf_jit.c | 2 +- arch/mips/net/bpf_jit_comp.c | 2 +- arch/parisc/net/bpf_jit_core.c | 2 +- arch/powerpc/net/bpf_jit_comp.c | 2 +- arch/riscv/net/bpf_jit_core.c | 2 +- arch/s390/net/bpf_jit_comp.c | 2 +- arch/sparc/net/bpf_jit_comp_64.c | 2 +- arch/x86/net/bpf_jit_comp.c | 2 +- arch/x86/net/bpf_jit_comp32.c | 2 +- include/linux/filter.h | 2 +- kernel/bpf/core.c | 6 ++++-- kernel/bpf/verifier.c | 21 +++++++++++++++------ 15 files changed, 32 insertions(+), 21 deletions(-) diff --git a/arch/arc/net/bpf_jit_core.c b/arch/arc/net/bpf_jit_core.c index e3628922c24a..146bc0606f18 100644 --- a/arch/arc/net/bpf_jit_core.c +++ b/arch/arc/net/bpf_jit_core.c @@ -1411,7 +1411,7 @@ static struct bpf_prog *do_extra_pass(struct bpf_prog *prog) * (re)locations involved that their addresses are not known * during the first run. */ -struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog, int *err) { vm_dump(prog); diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index deeb8f292454..81d6af62d47d 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -2142,7 +2142,7 @@ bool bpf_jit_needs_zext(void) return true; } -struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog, int *err) { struct bpf_prog *tmp, *orig_prog = prog; struct bpf_binary_header *header; diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 70d7c89d3ac9..cf88f174a145 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -1820,7 +1820,7 @@ struct arm64_jit_data { struct jit_ctx ctx; }; -struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog, int *err) { int image_size, prog_size, extable_size, extable_align, extable_offset; struct bpf_prog *tmp, *orig_prog = prog; diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c index fa1500d4aa3e..437e5e1130a0 100644 --- a/arch/loongarch/net/bpf_jit.c +++ b/arch/loongarch/net/bpf_jit.c @@ -1186,7 +1186,7 @@ static int validate_code(struct jit_ctx *ctx) return 0; } -struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog, int *err) { bool tmp_blinded = false, extra_pass = false; u8 *image_ptr; diff --git a/arch/mips/net/bpf_jit_comp.c b/arch/mips/net/bpf_jit_comp.c index e355dfca4400..deb6bf7150bc 100644 --- a/arch/mips/net/bpf_jit_comp.c +++ b/arch/mips/net/bpf_jit_comp.c @@ -909,7 +909,7 @@ bool bpf_jit_needs_zext(void) return true; } -struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog, int *err) { struct bpf_prog *tmp, *orig_prog = prog; struct bpf_binary_header *header = NULL; diff --git a/arch/parisc/net/bpf_jit_core.c b/arch/parisc/net/bpf_jit_core.c index 06cbcd6fe87b..0c74306cb392 100644 --- a/arch/parisc/net/bpf_jit_core.c +++ b/arch/parisc/net/bpf_jit_core.c @@ -41,7 +41,7 @@ bool bpf_jit_needs_zext(void) return true; } -struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog, int *err) { unsigned int prog_size = 0, extable_size = 0; bool tmp_blinded = false, extra_pass = false; diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 2991bb171a9b..ede2462f3653 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -129,7 +129,7 @@ bool bpf_jit_needs_zext(void) return true; } -struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp, int *err) { u32 proglen; u32 alloclen; diff --git a/arch/riscv/net/bpf_jit_core.c b/arch/riscv/net/bpf_jit_core.c index f8cd2f70a7fb..11fa033ec666 100644 --- a/arch/riscv/net/bpf_jit_core.c +++ b/arch/riscv/net/bpf_jit_core.c @@ -42,7 +42,7 @@ bool bpf_jit_needs_zext(void) return true; } -struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog, int *err) { unsigned int prog_size = 0, extable_size = 0; bool tmp_blinded = false, extra_pass = false; diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 0776dfde2dba..3d875ff21362 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -2255,7 +2255,7 @@ static struct bpf_binary_header *bpf_jit_alloc(struct bpf_jit *jit, /* * Compile eBPF program "fp" */ -struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp, int *err) { u32 stack_depth = round_up(fp->aux->stack_depth, 8); struct bpf_prog *tmp, *orig_fp = fp; diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c index 73bf0aea8baf..0e5aa8535a27 100644 --- a/arch/sparc/net/bpf_jit_comp_64.c +++ b/arch/sparc/net/bpf_jit_comp_64.c @@ -1477,7 +1477,7 @@ struct sparc64_jit_data { struct jit_ctx ctx; }; -struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog, int *err) { struct bpf_prog *tmp, *orig_prog = prog; struct sparc64_jit_data *jit_data; diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 9e5fe2ba858f..313e68414486 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -3495,7 +3495,7 @@ struct x64_jit_data { #define MAX_PASSES 20 #define PADDING_PASSES (MAX_PASSES - 5) -struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog, int *err) { struct bpf_binary_header *rw_header = NULL; struct bpf_binary_header *header = NULL; diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c index de0f9e5f9f73..628a96c12091 100644 --- a/arch/x86/net/bpf_jit_comp32.c +++ b/arch/x86/net/bpf_jit_comp32.c @@ -2518,7 +2518,7 @@ bool bpf_jit_needs_zext(void) return true; } -struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog, int *err) { struct bpf_binary_header *header = NULL; struct bpf_prog *tmp, *orig_prog = prog; diff --git a/include/linux/filter.h b/include/linux/filter.h index f5cf4d35d83e..4652dc8d46a7 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -1124,7 +1124,7 @@ u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); ((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \ (void *)__bpf_call_base) -struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog, int *err); void bpf_jit_compile(struct bpf_prog *prog); bool bpf_jit_needs_zext(void); bool bpf_jit_inlines_helper_call(s32 imm); diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index ba6b6118cf50..cbc973f9449f 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -2491,8 +2491,10 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) if (*err) return fp; - fp = bpf_int_jit_compile(fp); + fp = bpf_int_jit_compile(fp, err); bpf_prog_jit_attempt_done(fp); + if (*err) + return fp; if (!fp->jited && jit_needed) { *err = -ENOTSUPP; return fp; @@ -2999,7 +3001,7 @@ const struct bpf_func_proto bpf_tail_call_proto = { * It is encouraged to implement bpf_int_jit_compile() instead, so that * eBPF and implicitly also cBPF can get JITed! */ -struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog) +struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog, int *err) { return prog; } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 54c6953a8b84..2e2956bacf4f 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -21074,10 +21074,11 @@ static int jit_subprogs(struct bpf_verifier_env *env) if (err) goto out_undo_insn; - err = -ENOMEM; func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL); - if (!func) + if (!func) { + err = -ENOMEM; goto out_undo_insn; + } for (i = 0; i < env->subprog_cnt; i++) { subprog_start = subprog_end; @@ -21090,14 +21091,18 @@ static int jit_subprogs(struct bpf_verifier_env *env) * func[i]->stats will never be accessed and stays NULL */ func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER); - if (!func[i]) + if (!func[i]) { + err = -ENOMEM; goto out_free; + } memcpy(func[i]->insnsi, &prog->insnsi[subprog_start], len * sizeof(struct bpf_insn)); func[i]->type = prog->type; func[i]->len = len; - if (bpf_prog_calc_tag(func[i])) + if (bpf_prog_calc_tag(func[i])) { + err = -ENOMEM; goto out_free; + } func[i]->is_func = 1; func[i]->sleepable = prog->sleepable; func[i]->aux->func_idx = i; @@ -21154,7 +21159,9 @@ static int jit_subprogs(struct bpf_verifier_env *env) func[i]->aux->might_sleep = env->subprog_info[i].might_sleep; if (!i) func[i]->aux->exception_boundary = env->seen_exception; - func[i] = bpf_int_jit_compile(func[i]); + func[i] = bpf_int_jit_compile(func[i], &err); + if (err) + goto out_free; if (!func[i]->jited) { err = -ENOTSUPP; goto out_free; @@ -21198,7 +21205,9 @@ static int jit_subprogs(struct bpf_verifier_env *env) } for (i = 0; i < env->subprog_cnt; i++) { old_bpf_func = func[i]->bpf_func; - tmp = bpf_int_jit_compile(func[i]); + tmp = bpf_int_jit_compile(func[i], &err); + if (err) + goto out_free; if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) { verbose(env, "JIT doesn't support bpf-to-bpf calls\n"); err = -ENOTSUPP; -- 2.49.0