Re: [PATCH v3 bpf-next 1/2] bpf: Add range tracking for BPF_NEG

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Tue, 2025-06-24 at 16:33 -0700, Song Liu wrote:

[...]

> diff --git a/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c b/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c
> index fcea9819e359..799eccd181b5 100644
> --- a/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c
> +++ b/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c
> @@ -225,9 +225,7 @@ l2_%=:	r0 = 1;						\
>  
>  SEC("socket")
>  __description("map access: known scalar += value_ptr unknown vs unknown (lt)")
> -__success __failure_unpriv
> -__msg_unpriv("R1 tried to add from different maps, paths or scalars")
> -__retval(1)
> +__success __success_unpriv __retval(1)
>  __naked void ptr_unknown_vs_unknown_lt(void)
>  {
>  	asm volatile ("					\
> @@ -265,9 +263,7 @@ l2_%=:	r0 = 1;						\
>  
>  SEC("socket")
>  __description("map access: known scalar += value_ptr unknown vs unknown (gt)")
> -__success __failure_unpriv
> -__msg_unpriv("R1 tried to add from different maps, paths or scalars")
> -__retval(1)
> +__success __success_unpriv __retval(1)
>  __naked void ptr_unknown_vs_unknown_gt(void)
>  {
>  	asm volatile ("					\

Apologies for not being clear in previous messages.
Could you please avoid flipping these tests from __failure_unpriv to __success_unpriv?
Instead, the tests should be rewritten to conjure an unbound scalar
value in some different way. For example like below:

diff --git a/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c b/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c
index fcea9819e359..3593b15d11af 100644
--- a/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c
+++ b/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c
@@ -231,6 +231,10 @@ __retval(1)
 __naked void ptr_unknown_vs_unknown_lt(void)
 {
        asm volatile ("                                 \
+       r8 = r1;                                        \
+       call %[bpf_get_prandom_u32];                    \
+       r9 = r0;                                        \
+       r1 = r8;                                        \
        r0 = *(u32*)(r1 + %[__sk_buff_len]);            \
        r1 = 0;                                         \
        *(u64*)(r10 - 8) = r1;                          \
@@ -244,12 +248,10 @@ l1_%=:    call %[bpf_map_lookup_elem];                    \
        if r0 == 0 goto l2_%=;                          \
        r4 = *(u8*)(r0 + 0);                            \
        if r4 == 1 goto l3_%=;                          \
-       r1 = 6;                                         \
-       r1 = -r1;                                       \
+       r1 = r9;                                        \
        r1 &= 0x3;                                      \
        goto l4_%=;                                     \
-l3_%=: r1 = 6;                                         \
-       r1 = -r1;                                       \
+l3_%=: r1 = r9;                                        \
        r1 &= 0x7;                                      \
 l4_%=: r1 += r0;                                       \
        r0 = *(u8*)(r1 + 0);                            \
@@ -259,7 +261,8 @@ l2_%=:      r0 = 1;                                         \
        : __imm(bpf_map_lookup_elem),
          __imm_addr(map_array_48b),
          __imm_addr(map_hash_16b),
-         __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len))
+         __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)),
+         __imm(bpf_get_prandom_u32)
        : __clobber_all);
 }





[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux