Prepare for the next two patches which need to be able to choose either GFP_USER or GFP_ATOMIC for calls to bpf_iter_udp_realloc_batch by making memory flags configurable. Signed-off-by: Jordan Rife <jordan@xxxxxxxx> --- net/ipv4/udp.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index d0bffcfa56d8..0ac31dec339a 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -3395,7 +3395,7 @@ struct bpf_udp_iter_state { }; static int bpf_iter_udp_realloc_batch(struct bpf_udp_iter_state *iter, - unsigned int new_batch_sz); + unsigned int new_batch_sz, int flags); static struct sock *bpf_iter_udp_batch(struct seq_file *seq) { struct bpf_udp_iter_state *iter = seq->private; @@ -3471,7 +3471,8 @@ static struct sock *bpf_iter_udp_batch(struct seq_file *seq) iter->st_bucket_done = true; goto done; } - if (!resized && !bpf_iter_udp_realloc_batch(iter, batch_sks * 3 / 2)) { + if (!resized && !bpf_iter_udp_realloc_batch(iter, batch_sks * 3 / 2, + GFP_USER)) { resized = true; /* After allocating a larger batch, retry one more time to grab * the whole bucket. @@ -3825,12 +3826,12 @@ DEFINE_BPF_ITER_FUNC(udp, struct bpf_iter_meta *meta, struct udp_sock *udp_sk, uid_t uid, int bucket) static int bpf_iter_udp_realloc_batch(struct bpf_udp_iter_state *iter, - unsigned int new_batch_sz) + unsigned int new_batch_sz, int flags) { struct sock **new_batch; new_batch = kvmalloc_array(new_batch_sz, sizeof(*new_batch), - GFP_USER | __GFP_NOWARN); + flags | __GFP_NOWARN); if (!new_batch) return -ENOMEM; @@ -3853,7 +3854,7 @@ static int bpf_iter_init_udp(void *priv_data, struct bpf_iter_aux_info *aux) if (ret) return ret; - ret = bpf_iter_udp_realloc_batch(iter, INIT_BATCH_SZ); + ret = bpf_iter_udp_realloc_batch(iter, INIT_BATCH_SZ, GFP_USER); if (ret) bpf_iter_fini_seq_net(priv_data); -- 2.43.0