Begin reporting may_goto timeouts to BPF program's stderr stream. Make sure that we don't end up spamming too many errors if the program keeps failing repeatedly and filling up the stream, hence emit at most 512 error messages from the kernel for a given stream. Acked-by: Eduard Zingerman <eddyz87@xxxxxxxxx> Signed-off-by: Kumar Kartikeya Dwivedi <memxor@xxxxxxxxx> --- include/linux/bpf.h | 21 ++++++++++++++------- kernel/bpf/core.c | 19 ++++++++++++++++++- kernel/bpf/stream.c | 5 +++++ 3 files changed, 37 insertions(+), 8 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index aab5ea17a329..3449a31e9f66 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1682,6 +1682,7 @@ struct bpf_prog_aux { struct rcu_head rcu; }; struct bpf_stream stream[2]; + atomic_t stream_error_cnt; }; struct bpf_prog { @@ -3604,6 +3605,8 @@ void bpf_bprintf_cleanup(struct bpf_bprintf_data *data); int bpf_try_get_buffers(struct bpf_bprintf_buffers **bufs); void bpf_put_buffers(void); +#define BPF_PROG_STREAM_ERROR_CNT 512 + void bpf_prog_stream_init(struct bpf_prog *prog); void bpf_prog_stream_free(struct bpf_prog *prog); int bpf_prog_stream_read(struct bpf_prog *prog, enum bpf_stream_id stream_id, void __user *buf, int len); @@ -3615,16 +3618,20 @@ int bpf_stream_stage_commit(struct bpf_stream_stage *ss, struct bpf_prog *prog, enum bpf_stream_id stream_id); int bpf_stream_stage_dump_stack(struct bpf_stream_stage *ss); +bool bpf_prog_stream_error_limit(struct bpf_prog *prog); + #define bpf_stream_printk(...) bpf_stream_stage_printk(&__ss, __VA_ARGS__) #define bpf_stream_dump_stack() bpf_stream_stage_dump_stack(&__ss) -#define bpf_stream_stage(prog, stream_id, expr) \ - ({ \ - struct bpf_stream_stage __ss; \ - bpf_stream_stage_init(&__ss); \ - (expr); \ - bpf_stream_stage_commit(&__ss, prog, stream_id); \ - bpf_stream_stage_free(&__ss); \ +#define bpf_stream_stage(prog, stream_id, expr) \ + ({ \ + struct bpf_stream_stage __ss; \ + if (!bpf_prog_stream_error_limit(prog)) { \ + bpf_stream_stage_init(&__ss); \ + (expr); \ + bpf_stream_stage_commit(&__ss, prog, stream_id); \ + bpf_stream_stage_free(&__ss); \ + } \ }) #ifdef CONFIG_BPF_LSM diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 959538f91c60..8e74562e4114 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -3160,6 +3160,21 @@ u64 __weak arch_bpf_timed_may_goto(void) return 0; } +static noinline void bpf_prog_report_may_goto_violation(void) +{ +#ifdef CONFIG_BPF_SYSCALL + struct bpf_prog *prog; + + prog = bpf_prog_find_from_stack(); + if (!prog) + return; + bpf_stream_stage(prog, BPF_STDERR, ({ + bpf_stream_printk("ERROR: Timeout detected for may_goto instruction\n"); + bpf_stream_dump_stack(); + })); +#endif +} + u64 bpf_check_timed_may_goto(struct bpf_timed_may_goto *p) { u64 time = ktime_get_mono_fast_ns(); @@ -3170,8 +3185,10 @@ u64 bpf_check_timed_may_goto(struct bpf_timed_may_goto *p) return BPF_MAX_TIMED_LOOPS; } /* Check if we've exhausted our time slice, and zero count. */ - if (time - p->timestamp >= (NSEC_PER_SEC / 4)) + if (unlikely(time - p->timestamp >= (NSEC_PER_SEC / 4))) { + bpf_prog_report_may_goto_violation(); return 0; + } /* Refresh the count for the stack frame. */ return BPF_MAX_TIMED_LOOPS; } diff --git a/kernel/bpf/stream.c b/kernel/bpf/stream.c index cebd596671cd..c72b9480008c 100644 --- a/kernel/bpf/stream.c +++ b/kernel/bpf/stream.c @@ -537,3 +537,8 @@ int bpf_stream_stage_dump_stack(struct bpf_stream_stage *ss) ret = ret ?: ctx.err; return ret ?: bpf_stream_stage_printk(ss, "\n"); } + +bool bpf_prog_stream_error_limit(struct bpf_prog *prog) +{ + return atomic_fetch_add(1, &prog->aux->stream_error_cnt) >= BPF_PROG_STREAM_ERROR_CNT; +} -- 2.47.1