Re: [PATCH nf-next,v2 1/2] netfilter: nft_set_pipapo: prevent overflow in lookup table allocation

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Thu, 10 Apr 2025 12:04:55 +0200
Pablo Neira Ayuso <pablo@xxxxxxxxxxxxx> wrote:

> When calculating the lookup table size, ensure the following
> multiplication does not overflow:
> 
> - desc->field_len[] maximum value is U8_MAX multiplied by
>   NFT_PIPAPO_GROUPS_PER_BYTE(f) that can be 2, worst case.
> - NFT_PIPAPO_BUCKETS(f->bb) is 2^8, worst case.
> - sizeof(unsigned long), from sizeof(*f->lt), lt in
>   struct nft_pipapo_field.
> 
> Then, use check_mul_overflow() to multiply by bucket size and then use
> check_add_overflow() to the alignment for avx2 (if needed). Finally, add
> lt_size_check_overflow() helper and use it to consolidate this.
> 
> While at it, replace leftover allocation using the GFP_KERNEL to
> GFP_KERNEL_ACCOUNT for consistency, in pipapo_resize().
> 
> Fixes: 3c4287f62044 ("nf_tables: Add set type for arbitrary concatenation of ranges")
> Signed-off-by: Pablo Neira Ayuso <pablo@xxxxxxxxxxxxx>
> ---
> v2: detach chunk to ensure allocation below INT_MAX.
> 
>  net/netfilter/nft_set_pipapo.c | 48 +++++++++++++++++++++++++---------
>  1 file changed, 35 insertions(+), 13 deletions(-)
> 
> diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
> index 7be342b495f5..1b5c498468c5 100644
> --- a/net/netfilter/nft_set_pipapo.c
> +++ b/net/netfilter/nft_set_pipapo.c
> @@ -683,6 +683,22 @@ static int pipapo_realloc_mt(struct nft_pipapo_field *f,
>  	return 0;
>  }
>  
> +static bool lt_size_check_overflow(unsigned int groups, unsigned int bb,
> +				   unsigned int bsize, size_t size,
> +				   size_t *lt_size)

It looks good to me except for one consideration: I found it quite
confusing that this changes lt_size but at a first glance it appears to
check things only, so I've been wondering for a while if you
accidentally dropped the alignment headroom.

By the way, 'size' is always sizeof(long) (because 'bsize' is in longs).

I haven't tested this (not even build tested, sorry), but what about:

/**
 * lt_calculate_size() - Get storage size for lookup table with overflow check
 * @groups:	Amount of bit groups
 * @bb:		Number of bits grouped together in lookup table buckets
 * @bsize:	Size of each bucket in lookup table, in longs
 *
 * Return: allocation size including alignment overhead, negative on overflow
 */
static ssize_t lt_size_check_overflow(unsigned int groups, unsigned int bb,
				      unsigned int bsize)
{
	ssize_t ret = groups * NFT_PIPAPO_BUCKETS(bb) * sizeof(long);

	if (check_mul_overflow(ret, bsize, &ret))
		return -1;
	if (check_add_overflow(ret, NFT_PIPAPO_ALIGN_HEADROOM, &ret))
		return -1;
	if (ret > INT_MAX)
		return -1;

	return ret;
}

?

> +{
> +	*lt_size = groups * NFT_PIPAPO_BUCKETS(bb) * size;
> +
> +	if (check_mul_overflow(*lt_size, bsize, lt_size))
> +		return true;
> +	if (check_add_overflow(*lt_size, NFT_PIPAPO_ALIGN_HEADROOM, lt_size))
> +		return true;
> +	if (*lt_size > INT_MAX)
> +		return true;
> +
> +	return false;
> +}
> +
>  /**
>   * pipapo_resize() - Resize lookup or mapping table, or both
>   * @f:		Field containing lookup and mapping tables
> @@ -701,6 +717,7 @@ static int pipapo_resize(struct nft_pipapo_field *f,
>  	long *new_lt = NULL, *new_p, *old_lt = f->lt, *old_p;
>  	unsigned int new_bucket_size, copy;
>  	int group, bucket, err;
> +	size_t lt_size;
>  
>  	if (rules >= NFT_PIPAPO_RULE0_MAX)
>  		return -ENOSPC;
> @@ -719,10 +736,11 @@ static int pipapo_resize(struct nft_pipapo_field *f,
>  	else
>  		copy = new_bucket_size;
>  
> -	new_lt = kvzalloc(f->groups * NFT_PIPAPO_BUCKETS(f->bb) *
> -			  new_bucket_size * sizeof(*new_lt) +
> -			  NFT_PIPAPO_ALIGN_HEADROOM,
> -			  GFP_KERNEL);
> +	if (lt_size_check_overflow(f->groups, f->bb, new_bucket_size,
> +				   sizeof(*new_lt), &lt_size))
> +		return -ENOMEM;
> +
> +	new_lt = kvzalloc(lt_size, GFP_KERNEL_ACCOUNT);
>  	if (!new_lt)
>  		return -ENOMEM;
>  
> @@ -917,15 +935,17 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
>  		groups = f->groups * 2;
>  		bb = NFT_PIPAPO_GROUP_BITS_LARGE_SET;
>  
> -		lt_size = groups * NFT_PIPAPO_BUCKETS(bb) * f->bsize *
> -			  sizeof(*f->lt);
> +		if (lt_size_check_overflow(groups, bb, f->bsize,
> +					   sizeof(*f->lt), &lt_size))
> +			return;
>  	} else if (f->bb == NFT_PIPAPO_GROUP_BITS_LARGE_SET &&
>  		   lt_size < NFT_PIPAPO_LT_SIZE_LOW) {
>  		groups = f->groups / 2;
>  		bb = NFT_PIPAPO_GROUP_BITS_SMALL_SET;
>  
> -		lt_size = groups * NFT_PIPAPO_BUCKETS(bb) * f->bsize *
> -			  sizeof(*f->lt);
> +		if (lt_size_check_overflow(groups, bb, f->bsize,
> +					   sizeof(*f->lt), &lt_size))
> +			return;
>  
>  		/* Don't increase group width if the resulting lookup table size
>  		 * would exceed the upper size threshold for a "small" set.
> @@ -936,7 +956,7 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
>  		return;
>  	}
>  
> -	new_lt = kvzalloc(lt_size + NFT_PIPAPO_ALIGN_HEADROOM, GFP_KERNEL_ACCOUNT);
> +	new_lt = kvzalloc(lt_size, GFP_KERNEL_ACCOUNT);
>  	if (!new_lt)
>  		return;
>  
> @@ -1451,13 +1471,15 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old)
>  
>  	for (i = 0; i < old->field_count; i++) {
>  		unsigned long *new_lt;
> +		size_t lt_size;
>  
>  		memcpy(dst, src, offsetof(struct nft_pipapo_field, lt));
>  
> -		new_lt = kvzalloc(src->groups * NFT_PIPAPO_BUCKETS(src->bb) *
> -				  src->bsize * sizeof(*dst->lt) +
> -				  NFT_PIPAPO_ALIGN_HEADROOM,
> -				  GFP_KERNEL_ACCOUNT);
> +		if (lt_size_check_overflow(src->groups, src->bb, src->bsize,
> +					   sizeof(*dst->lt), &lt_size))
> +			goto out_lt;
> +
> +		new_lt = kvzalloc(lt_size, GFP_KERNEL_ACCOUNT);
>  		if (!new_lt)
>  			goto out_lt;
>  

The rest looks good to me, thanks for fixing this!

-- 
Stefano





[Index of Archives]     [Netfitler Users]     [Berkeley Packet Filter]     [LARTC]     [Bugtraq]     [Yosemite Forum]

  Powered by Linux