From: Zijian Zhang <zijianzhang@xxxxxxxxxxxxx>
Optimizing redirect ingress performance requires frequent allocation and
deallocation of sk_msg structures. Introduce a dedicated kmem_cache for
sk_msg to reduce memory allocation overhead and improve performance.
Reviewed-by: Cong Wang <cong.wang@xxxxxxxxxxxxx>
Signed-off-by: Zijian Zhang <zijianzhang@xxxxxxxxxxxxx>
---
include/linux/skmsg.h | 21 ++++++++++++---------
net/core/skmsg.c | 28 +++++++++++++++++++++-------
net/ipv4/tcp_bpf.c | 5 ++---
3 files changed, 35 insertions(+), 19 deletions(-)
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index d6f0a8cd73c4..bf28ce9b5fdb 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -121,6 +121,7 @@ struct sk_psock {
struct rcu_work rwork;
};
+struct sk_msg *sk_msg_alloc(gfp_t gfp);
int sk_msg_expand(struct sock *sk, struct sk_msg *msg, int len,
int elem_first_coalesce);
int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
@@ -143,6 +144,8 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
int len, int flags);
bool sk_msg_is_readable(struct sock *sk);
+extern struct kmem_cache *sk_msg_cachep;
+
static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes)
{
WARN_ON(i == msg->sg.end && bytes);
@@ -319,6 +322,13 @@ static inline void sock_drop(struct sock *sk, struct sk_buff *skb)
kfree_skb(skb);
}
+static inline void kfree_sk_msg(struct sk_msg *msg)
+{
+ if (msg->skb)
+ consume_skb(msg->skb);
+ kmem_cache_free(sk_msg_cachep, msg);
+}
+
static inline bool sk_psock_queue_msg(struct sk_psock *psock,
struct sk_msg *msg)
{
@@ -330,7 +340,7 @@ static inline bool sk_psock_queue_msg(struct sk_psock *psock,
ret = true;
} else {
sk_msg_free(psock->sk, msg);
- kfree(msg);
+ kfree_sk_msg(msg);