Files
linux/include/net/proto_memory.h
Kuniyuki Iwashima 7c268eaeec net: Allow opt-out from global protocol memory accounting.
Some protocols (e.g., TCP, UDP) implement memory accounting for socket
buffers and charge memory to per-protocol global counters pointed to by
sk->sk_proto->memory_allocated.

Sometimes, system processes do not want that limitation.  For a similar
purpose, there is SO_RESERVE_MEM for sockets under memcg.

Also, by opting out of the per-protocol accounting, sockets under memcg
can avoid paying costs for two orthogonal memory accounting mechanisms.
A microbenchmark result is in the subsequent bpf patch.

Let's allow opt-out from the per-protocol memory accounting if
sk->sk_bypass_prot_mem is true.

sk->sk_bypass_prot_mem and sk->sk_prot are placed in the same cache
line, and sk_has_account() always fetches sk->sk_prot before accessing
sk->sk_bypass_prot_mem, so there is no extra cache miss for this patch.

The following patches will set sk->sk_bypass_prot_mem to true, and
then, the per-protocol memory accounting will be skipped.

Note that this does NOT disable memcg, but rather the per-protocol one.

Another option not to use the hole in struct sock_common is create
sk_prot variants like tcp_prot_bypass, but this would complicate
SOCKMAP logic, tcp_bpf_prots etc.

Signed-off-by: Kuniyuki Iwashima <kuniyu@google.com>
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Reviewed-by: Shakeel Butt <shakeel.butt@linux.dev>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Acked-by: Roman Gushchin <roman.gushchin@linux.dev>
Link: https://patch.msgid.link/20251014235604.3057003-3-kuniyu@google.com
2025-10-16 12:04:47 -07:00

87 lines
1.9 KiB
C

/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _PROTO_MEMORY_H
#define _PROTO_MEMORY_H
#include <net/sock.h>
#include <net/hotdata.h>
/* 1 MB per cpu, in page units */
#define SK_MEMORY_PCPU_RESERVE (1 << (20 - PAGE_SHIFT))
static inline bool sk_has_memory_pressure(const struct sock *sk)
{
return sk->sk_prot->memory_pressure != NULL;
}
static inline bool
proto_memory_pressure(const struct proto *prot)
{
if (!prot->memory_pressure)
return false;
return !!READ_ONCE(*prot->memory_pressure);
}
static inline bool sk_under_global_memory_pressure(const struct sock *sk)
{
return proto_memory_pressure(sk->sk_prot);
}
static inline bool sk_under_memory_pressure(const struct sock *sk)
{
if (!sk->sk_prot->memory_pressure)
return false;
if (mem_cgroup_sk_enabled(sk) &&
mem_cgroup_sk_under_memory_pressure(sk))
return true;
if (sk->sk_bypass_prot_mem)
return false;
return !!READ_ONCE(*sk->sk_prot->memory_pressure);
}
static inline long
proto_memory_allocated(const struct proto *prot)
{
return max(0L, atomic_long_read(prot->memory_allocated));
}
static inline long
sk_memory_allocated(const struct sock *sk)
{
return proto_memory_allocated(sk->sk_prot);
}
static inline void proto_memory_pcpu_drain(struct proto *proto)
{
int val = this_cpu_xchg(*proto->per_cpu_fw_alloc, 0);
if (val)
atomic_long_add(val, proto->memory_allocated);
}
static inline void
sk_memory_allocated_add(const struct sock *sk, int val)
{
struct proto *proto = sk->sk_prot;
val = this_cpu_add_return(*proto->per_cpu_fw_alloc, val);
if (unlikely(val >= READ_ONCE(net_hotdata.sysctl_mem_pcpu_rsv)))
proto_memory_pcpu_drain(proto);
}
static inline void
sk_memory_allocated_sub(const struct sock *sk, int val)
{
struct proto *proto = sk->sk_prot;
val = this_cpu_sub_return(*proto->per_cpu_fw_alloc, val);
if (unlikely(val <= -READ_ONCE(net_hotdata.sysctl_mem_pcpu_rsv)))
proto_memory_pcpu_drain(proto);
}
#endif /* _PROTO_MEMORY_H */