Clean-up bounds checking for trace->nr in __bpf_get_stack by limiting it only to max_depth. Acked-by: Song Liu <song@xxxxxxxxxx> Cc: Song Liu <song@xxxxxxxxxx> Signed-off-by: Arnaud Lecomte <contact@xxxxxxxxxxxxxx> --- kernel/bpf/stackmap.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index a794e04f5ae9..9a86b5acac10 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -462,13 +462,15 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task, if (may_fault) rcu_read_lock(); /* need RCU for perf's callchain below */ - if (trace_in) + if (trace_in) { trace = trace_in; - else if (kernel && task) + trace->nr = min_t(u32, trace->nr, max_depth); + } else if (kernel && task) { trace = get_callchain_entry_for_task(task, max_depth); - else + } else { trace = get_perf_callchain(regs, 0, kernel, user, max_depth, crosstask, false); + } if (unlikely(!trace) || trace->nr < skip) { if (may_fault) -- 2.43.0