We will decouple sockets from the global protocol memory accounting if sockets have SK_BPF_MEMCG_SOCK_ISOLATED. This can be flagged via bpf_setsockopt() during socket() or accept(): flags = SK_BPF_MEMCG_SOCK_ISOLATED; bpf_setsockopt(ctx, SOL_SOCKET, SK_BPF_MEMCG_FLAGS, &flags, sizeof(flags)); Given sk->sk_memcg can be accessed in the fast path, it would be preferable to place the flag field in the same cache line as sk->sk_memcg. However, struct sock does not have such a 1-byte hole. Let's store the flag in the lowest bit of sk->sk_memcg and add a helper to check the bit. In the next patch, if mem_cgroup_sk_isolated() returns true, the socket will not be charged to sk->sk_prot->memory_allocated. Note that we do not support other hooks because UDP charges memory under sk->sk_receive_queue.lock instead of lock_sock(). Signed-off-by: Kuniyuki Iwashima <kuniyu@xxxxxxxxxx> --- include/net/sock.h | 48 ++++++++++++++++++++++++++++++++++ include/uapi/linux/bpf.h | 6 +++++ net/core/filter.c | 32 ++++++++++++++++++++++- tools/include/uapi/linux/bpf.h | 6 +++++ 4 files changed, 91 insertions(+), 1 deletion(-) diff --git a/include/net/sock.h b/include/net/sock.h index 63a6a48afb48..fb33a7af7c9a 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -2596,10 +2596,39 @@ static inline gfp_t gfp_memcg_charge(void) return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; } +#define SK_BPF_MEMCG_FLAG_MASK (SK_BPF_MEMCG_FLAG_MAX - 1) +#define SK_BPF_MEMCG_PTR_MASK ~SK_BPF_MEMCG_FLAG_MASK + #ifdef CONFIG_MEMCG +static inline void mem_cgroup_sk_set_flags(struct sock *sk, unsigned short flags) +{ + unsigned long val = (unsigned long)sk->sk_memcg; + + val |= flags; + sk->sk_memcg = (struct mem_cgroup *)val; +} + +static inline unsigned short mem_cgroup_sk_get_flags(const struct sock *sk) +{ +#ifdef CONFIG_BPF_SYSCALL + unsigned long val = (unsigned long)sk->sk_memcg; + + return val & SK_BPF_MEMCG_FLAG_MASK; +#else + return 0; +#endif +} + static inline struct mem_cgroup *mem_cgroup_from_sk(const struct sock *sk) { +#ifdef CONFIG_BPF_SYSCALL + unsigned long val = (unsigned long)sk->sk_memcg; + + val &= SK_BPF_MEMCG_PTR_MASK; + return (struct mem_cgroup *)val; +#else return sk->sk_memcg; +#endif } static inline bool mem_cgroup_sk_enabled(const struct sock *sk) @@ -2607,6 +2636,11 @@ static inline bool mem_cgroup_sk_enabled(const struct sock *sk) return mem_cgroup_sockets_enabled && mem_cgroup_from_sk(sk); } +static inline bool mem_cgroup_sk_isolated(const struct sock *sk) +{ + return mem_cgroup_sk_get_flags(sk) & SK_BPF_MEMCG_SOCK_ISOLATED; +} + static inline bool mem_cgroup_sk_under_memory_pressure(const struct sock *sk) { struct mem_cgroup *memcg = mem_cgroup_from_sk(sk); @@ -2624,6 +2658,15 @@ static inline bool mem_cgroup_sk_under_memory_pressure(const struct sock *sk) return false; } #else +static inline void mem_cgroup_sk_set_flag(struct sock *sk, unsigned short flags) +{ +} + +static inline unsigned short mem_cgroup_sk_get_flags(const struct sock *sk) +{ + return 0; +} + static inline struct mem_cgroup *mem_cgroup_from_sk(const struct sock *sk) { return NULL; @@ -2634,6 +2677,11 @@ static inline bool mem_cgroup_sk_enabled(const struct sock *sk) return false; } +static inline bool mem_cgroup_sk_isolated(const struct sock *sk) +{ + return false; +} + static inline bool mem_cgroup_sk_under_memory_pressure(const struct sock *sk) { return false; diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 80df246d4741..9657496e0f3c 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -7183,6 +7183,7 @@ enum { TCP_BPF_SYN_MAC = 1007, /* Copy the MAC, IP[46], and TCP header */ TCP_BPF_SOCK_OPS_CB_FLAGS = 1008, /* Get or Set TCP sock ops flags */ SK_BPF_CB_FLAGS = 1009, /* Get or set sock ops flags in socket */ + SK_BPF_MEMCG_FLAGS = 1010, /* Get or Set flags saved in sk->sk_memcg */ }; enum { @@ -7205,6 +7206,11 @@ enum { */ }; +enum { + SK_BPF_MEMCG_SOCK_ISOLATED = (1UL << 0), + SK_BPF_MEMCG_FLAG_MAX = (1UL << 1), +}; + struct bpf_perf_event_value { __u64 counter; __u64 enabled; diff --git a/net/core/filter.c b/net/core/filter.c index aa17c7ed5aed..d8a9f73095fb 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -5267,6 +5267,31 @@ static int sk_bpf_set_get_cb_flags(struct sock *sk, char *optval, bool getopt) return 0; } +static int sk_bpf_set_get_memcg_flags(struct sock *sk, int *optval, bool getopt) +{ + if (!mem_cgroup_sk_enabled(sk)) + return -EOPNOTSUPP; + + if (getopt) { + *optval = mem_cgroup_sk_get_flags(sk); + return 0; + } + + /* Don't allow once sk has been published to userspace. + * INET_CREATE is called without lock_sock() but with sk_socket + * INET_ACCEPT is called with lock_sock() but without sk_socket + */ + if (sock_owned_by_user_nocheck(sk) && sk->sk_socket) + return -EBUSY; + + if (*optval <= 0 || *optval >= SK_BPF_MEMCG_FLAG_MAX) + return -EINVAL; + + mem_cgroup_sk_set_flags(sk, *optval); + + return 0; +} + static int sol_socket_sockopt(struct sock *sk, int optname, char *optval, int *optlen, bool getopt) @@ -5284,6 +5309,7 @@ static int sol_socket_sockopt(struct sock *sk, int optname, case SO_BINDTOIFINDEX: case SO_TXREHASH: case SK_BPF_CB_FLAGS: + case SK_BPF_MEMCG_FLAGS: if (*optlen != sizeof(int)) return -EINVAL; break; @@ -5293,8 +5319,12 @@ static int sol_socket_sockopt(struct sock *sk, int optname, return -EINVAL; } - if (optname == SK_BPF_CB_FLAGS) + switch (optname) { + case SK_BPF_CB_FLAGS: return sk_bpf_set_get_cb_flags(sk, optval, getopt); + case SK_BPF_MEMCG_FLAGS: + return sk_bpf_set_get_memcg_flags(sk, (int *)optval, getopt); + } if (getopt) { if (optname == SO_BINDTODEVICE) diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 80df246d4741..9657496e0f3c 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -7183,6 +7183,7 @@ enum { TCP_BPF_SYN_MAC = 1007, /* Copy the MAC, IP[46], and TCP header */ TCP_BPF_SOCK_OPS_CB_FLAGS = 1008, /* Get or Set TCP sock ops flags */ SK_BPF_CB_FLAGS = 1009, /* Get or set sock ops flags in socket */ + SK_BPF_MEMCG_FLAGS = 1010, /* Get or Set flags saved in sk->sk_memcg */ }; enum { @@ -7205,6 +7206,11 @@ enum { */ }; +enum { + SK_BPF_MEMCG_SOCK_ISOLATED = (1UL << 0), + SK_BPF_MEMCG_FLAG_MAX = (1UL << 1), +}; + struct bpf_perf_event_value { __u64 counter; __u64 enabled; -- 2.51.0.rc2.233.g662b1ed5c5-goog