We will decouple sockets from the global protocol memory accounting if sockets have SK_BPF_MEMCG_SOCK_ISOLATED. This can be flagged (and cleared) at the BPF_CGROUP_INET_SOCK_CREATE hook by bpf_setsockopt() and is inherited to child sockets. u32 flags = SK_BPF_MEMCG_SOCK_ISOLATED; bpf_setsockopt(ctx, SOL_SOCKET, SK_BPF_MEMCG_FLAGS, &flags, sizeof(flags)); SK_BPF_MEMCG_FLAGS is only supported at BPF_CGROUP_INET_SOCK_CREATE and not supported on other hooks for some reasons: 1. UDP charges memory under sk->sk_receive_queue.lock instead of lock_sock() 2. For TCP child sockets, memory accounting is adjusted only in __inet_accept() which sk->sk_memcg allocation is deferred to 3. Modifying the flag after skb is charged to sk requires such adjustment during bpf_setsockopt() and complicates the logic unnecessarily We can support other hooks later if a real use case justifies that. Given sk->sk_memcg can be accessed in the fast path, it would be preferable to place the flag field in the same cache line as sk->sk_memcg. However, struct sock does not have such a 1-byte hole. Let's store the flag in the lowest bit of sk->sk_memcg and add a helper to check the bit. In the next patch, if mem_cgroup_sk_isolated() returns true, the socket will not be charged to sk->sk_prot->memory_allocated. Signed-off-by: Kuniyuki Iwashima <kuniyu@xxxxxxxxxx> --- v5: * Limit getsockopt() to BPF_CGROUP_INET_SOCK_CREATE v4: * Only allow inet_create() to set flags * Inherit flags from listener to child in sk_clone_lock() * Support clearing flags v3: * Allow setting flags without sk->sk_memcg in sk_bpf_set_get_memcg_flags() * Preserve flags in __inet_accept() v2: * s/mem_cgroup_sk_set_flag/mem_cgroup_sk_set_flags/ when CONFIG_MEMCG=n * Use CONFIG_CGROUP_BPF instead of CONFIG_BPF_SYSCALL for ifdef --- include/net/sock.h | 50 ++++++++++++++++++++++++++++++++++ include/uapi/linux/bpf.h | 6 ++++ net/core/filter.c | 34 +++++++++++++++++++++++ net/core/sock.c | 1 + net/ipv4/af_inet.c | 4 +++ tools/include/uapi/linux/bpf.h | 6 ++++ 6 files changed, 101 insertions(+) diff --git a/include/net/sock.h b/include/net/sock.h index 63a6a48afb48..703cb9116c6e 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -2596,10 +2596,41 @@ static inline gfp_t gfp_memcg_charge(void) return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; } +#define SK_BPF_MEMCG_FLAG_MASK (SK_BPF_MEMCG_FLAG_MAX - 1) +#define SK_BPF_MEMCG_PTR_MASK ~SK_BPF_MEMCG_FLAG_MASK + #ifdef CONFIG_MEMCG static inline struct mem_cgroup *mem_cgroup_from_sk(const struct sock *sk) { +#ifdef CONFIG_CGROUP_BPF + unsigned long val = (unsigned long)sk->sk_memcg; + + val &= SK_BPF_MEMCG_PTR_MASK; + return (struct mem_cgroup *)val; +#else return sk->sk_memcg; +#endif +} + +static inline void mem_cgroup_sk_set_flags(struct sock *sk, unsigned short flags) +{ +#ifdef CONFIG_CGROUP_BPF + unsigned long val = (unsigned long)mem_cgroup_from_sk(sk); + + val |= flags; + sk->sk_memcg = (struct mem_cgroup *)val; +#endif +} + +static inline unsigned short mem_cgroup_sk_get_flags(const struct sock *sk) +{ +#ifdef CONFIG_CGROUP_BPF + unsigned long val = (unsigned long)sk->sk_memcg; + + return val & SK_BPF_MEMCG_FLAG_MASK; +#else + return 0; +#endif } static inline bool mem_cgroup_sk_enabled(const struct sock *sk) @@ -2607,6 +2638,11 @@ static inline bool mem_cgroup_sk_enabled(const struct sock *sk) return mem_cgroup_sockets_enabled && mem_cgroup_from_sk(sk); } +static inline bool mem_cgroup_sk_isolated(const struct sock *sk) +{ + return mem_cgroup_sk_get_flags(sk) & SK_BPF_MEMCG_SOCK_ISOLATED; +} + static inline bool mem_cgroup_sk_under_memory_pressure(const struct sock *sk) { struct mem_cgroup *memcg = mem_cgroup_from_sk(sk); @@ -2629,11 +2665,25 @@ static inline struct mem_cgroup *mem_cgroup_from_sk(const struct sock *sk) return NULL; } +static inline void mem_cgroup_sk_set_flags(struct sock *sk, unsigned short flags) +{ +} + +static inline unsigned short mem_cgroup_sk_get_flags(const struct sock *sk) +{ + return 0; +} + static inline bool mem_cgroup_sk_enabled(const struct sock *sk) { return false; } +static inline bool mem_cgroup_sk_isolated(const struct sock *sk) +{ + return false; +} + static inline bool mem_cgroup_sk_under_memory_pressure(const struct sock *sk) { return false; diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 233de8677382..52b8c2278589 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -7182,6 +7182,7 @@ enum { TCP_BPF_SYN_MAC = 1007, /* Copy the MAC, IP[46], and TCP header */ TCP_BPF_SOCK_OPS_CB_FLAGS = 1008, /* Get or Set TCP sock ops flags */ SK_BPF_CB_FLAGS = 1009, /* Get or set sock ops flags in socket */ + SK_BPF_MEMCG_FLAGS = 1010, /* Get or Set flags saved in sk->sk_memcg */ }; enum { @@ -7204,6 +7205,11 @@ enum { */ }; +enum { + SK_BPF_MEMCG_SOCK_ISOLATED = (1UL << 0), + SK_BPF_MEMCG_FLAG_MAX = (1UL << 1), +}; + struct bpf_perf_event_value { __u64 counter; __u64 enabled; diff --git a/net/core/filter.c b/net/core/filter.c index 31b259f02ee9..df2496120076 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -5723,9 +5723,39 @@ static const struct bpf_func_proto bpf_sock_addr_getsockopt_proto = { .arg5_type = ARG_CONST_SIZE, }; +static int sk_bpf_set_get_memcg_flags(struct sock *sk, + char *optval, int optlen, + bool getopt) +{ + u32 flags; + + if (optlen != sizeof(u32)) + return -EINVAL; + + if (!sk_has_account(sk)) + return -EOPNOTSUPP; + + if (getopt) { + *(u32 *)optval = mem_cgroup_sk_get_flags(sk); + return 0; + } + + flags = *(u32 *)optval; + if (flags >= SK_BPF_MEMCG_FLAG_MAX) + return -EINVAL; + + mem_cgroup_sk_set_flags(sk, flags); + + return 0; +} + BPF_CALL_5(bpf_sock_create_setsockopt, struct sock *, sk, int, level, int, optname, char *, optval, int, optlen) { + if (IS_ENABLED(CONFIG_MEMCG) && + level == SOL_SOCKET && optname == SK_BPF_MEMCG_FLAGS) + return sk_bpf_set_get_memcg_flags(sk, optval, optlen, false); + return __bpf_setsockopt(sk, level, optname, optval, optlen); } @@ -5743,6 +5773,10 @@ static const struct bpf_func_proto bpf_sock_create_setsockopt_proto = { BPF_CALL_5(bpf_sock_create_getsockopt, struct sock *, sk, int, level, int, optname, char *, optval, int, optlen) { + if (IS_ENABLED(CONFIG_MEMCG) && + level == SOL_SOCKET && optname == SK_BPF_MEMCG_FLAGS) + return sk_bpf_set_get_memcg_flags(sk, optval, optlen, true); + return __bpf_getsockopt(sk, level, optname, optval, optlen); } diff --git a/net/core/sock.c b/net/core/sock.c index 8002ac6293dc..ae30d7d54498 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2515,6 +2515,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) #ifdef CONFIG_MEMCG /* sk->sk_memcg will be populated at accept() time */ newsk->sk_memcg = NULL; + mem_cgroup_sk_set_flags(newsk, mem_cgroup_sk_get_flags(sk)); #endif cgroup_sk_clone(&newsk->sk_cgrp_data); diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index d42757f74c6e..9b62f1ae13ba 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -758,12 +758,16 @@ void __inet_accept(struct socket *sock, struct socket *newsock, struct sock *new (!IS_ENABLED(CONFIG_IP_SCTP) || sk_is_tcp(newsk) || sk_is_mptcp(newsk))) { gfp_t gfp = GFP_KERNEL | __GFP_NOFAIL; + unsigned short flags; + flags = mem_cgroup_sk_get_flags(newsk); mem_cgroup_sk_alloc(newsk); if (mem_cgroup_from_sk(newsk)) { int amt; + mem_cgroup_sk_set_flags(newsk, flags); + /* The socket has not been accepted yet, no need * to look at newsk->sk_wmem_queued. */ diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 233de8677382..52b8c2278589 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -7182,6 +7182,7 @@ enum { TCP_BPF_SYN_MAC = 1007, /* Copy the MAC, IP[46], and TCP header */ TCP_BPF_SOCK_OPS_CB_FLAGS = 1008, /* Get or Set TCP sock ops flags */ SK_BPF_CB_FLAGS = 1009, /* Get or set sock ops flags in socket */ + SK_BPF_MEMCG_FLAGS = 1010, /* Get or Set flags saved in sk->sk_memcg */ }; enum { @@ -7204,6 +7205,11 @@ enum { */ }; +enum { + SK_BPF_MEMCG_SOCK_ISOLATED = (1UL << 0), + SK_BPF_MEMCG_FLAG_MAX = (1UL << 1), +}; + struct bpf_perf_event_value { __u64 counter; __u64 enabled; -- 2.51.0.338.gd7d06c2dae-goog