On 8/28/25 6:00 PM, Kuniyuki Iwashima wrote:
+static inline bool sk_should_enter_memory_pressure(struct sock *sk)
+{
+ return !mem_cgroup_sk_enabled(sk) || !mem_cgroup_sk_isolated(sk);
+}
+
static inline long
proto_memory_allocated(const struct proto *prot)
{
@@ -3154,8 +3158,11 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
return true;
- sk_enter_memory_pressure(sk);
+ if (sk_should_enter_memory_pressure(sk))
+ sk_enter_memory_pressure(sk);
+
sk_stream_moderate_sndbuf(sk);
+
return false;
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 71a956fbfc55..dcbd49e2f8af 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -908,7 +908,8 @@ struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp,
}
__kfree_skb(skb);
} else {
- sk->sk_prot->enter_memory_pressure(sk);
+ if (sk_should_enter_memory_pressure(sk))
+ tcp_enter_memory_pressure(sk);
This change from sk_prot->enter_memory_pressure to tcp_enter_memory_pressure
looks fine. A qq / nit, have you thought about checking
sk_should_enter_memory_pressure inside the tcp_enter_memory_pressure(sk) /
sk_enter_memory_pressure(sk) ?
Other changes of patch 4 lgtm.
Shakeel, you have ack-ed patch 1. Will you take a look at patch 3 and patch 4 also?
sk_stream_moderate_sndbuf(sk);
}
@@ -1016,7 +1017,7 @@ static void mptcp_enter_memory_pressure(struct sock *sk)
mptcp_for_each_subflow(msk, subflow) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
- if (first)
+ if (first && sk_should_enter_memory_pressure(ssk))
tcp_enter_memory_pressure(ssk);
sk_stream_moderate_sndbuf(ssk);
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index f672a62a9a52..6696ef837116 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -35,6 +35,7 @@
#include <linux/netdevice.h>
#include <net/dst.h>
#include <net/inet_connection_sock.h>
+#include <net/proto_memory.h>
#include <net/tcp.h>
#include <net/tls.h>
#include <linux/skbuff_ref.h>
@@ -371,7 +372,8 @@ static int tls_do_allocation(struct sock *sk,
if (!offload_ctx->open_record) {
if (unlikely(!skb_page_frag_refill(prepend_size, pfrag,
sk->sk_allocation))) {
- READ_ONCE(sk->sk_prot)->enter_memory_pressure(sk);
+ if (sk_should_enter_memory_pressure(sk))
+ READ_ONCE(sk->sk_prot)->enter_memory_pressure(sk);
sk_stream_moderate_sndbuf(sk);
return -ENOMEM;
}