mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
bpf: Convert cgroup sockaddr filters to use sockaddr_unsized consistently
Update BPF cgroup sockaddr filtering infrastructure to use sockaddr_unsized consistently throughout the call chain, removing redundant explicit casts from callers. No binary changes expected. Signed-off-by: Kees Cook <kees@kernel.org> Link: https://patch.msgid.link/20251104002617.2752303-6-kees@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
committed by
Jakub Kicinski
parent
449f68f8ff
commit
8116d803e7
@@ -120,7 +120,7 @@ int __cgroup_bpf_run_filter_sk(struct sock *sk,
|
||||
enum cgroup_bpf_attach_type atype);
|
||||
|
||||
int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
|
||||
struct sockaddr *uaddr,
|
||||
struct sockaddr_unsized *uaddr,
|
||||
int *uaddrlen,
|
||||
enum cgroup_bpf_attach_type atype,
|
||||
void *t_ctx,
|
||||
@@ -238,8 +238,9 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled(atype)) \
|
||||
__ret = __cgroup_bpf_run_filter_sock_addr(sk, (struct sockaddr *)uaddr, uaddrlen, \
|
||||
atype, NULL, NULL); \
|
||||
__ret = __cgroup_bpf_run_filter_sock_addr(sk, \
|
||||
(struct sockaddr_unsized *)uaddr, uaddrlen, \
|
||||
atype, NULL, NULL); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
@@ -248,8 +249,9 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled(atype)) { \
|
||||
lock_sock(sk); \
|
||||
__ret = __cgroup_bpf_run_filter_sock_addr(sk, (struct sockaddr *)uaddr, uaddrlen, \
|
||||
atype, t_ctx, NULL); \
|
||||
__ret = __cgroup_bpf_run_filter_sock_addr(sk, \
|
||||
(struct sockaddr_unsized *)uaddr, uaddrlen, \
|
||||
atype, t_ctx, NULL); \
|
||||
release_sock(sk); \
|
||||
} \
|
||||
__ret; \
|
||||
@@ -266,8 +268,9 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled(atype)) { \
|
||||
lock_sock(sk); \
|
||||
__ret = __cgroup_bpf_run_filter_sock_addr(sk, (struct sockaddr *)uaddr, uaddrlen, \
|
||||
atype, NULL, &__flags); \
|
||||
__ret = __cgroup_bpf_run_filter_sock_addr(sk, \
|
||||
(struct sockaddr_unsized *)uaddr, uaddrlen, \
|
||||
atype, NULL, &__flags); \
|
||||
release_sock(sk); \
|
||||
if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE) \
|
||||
*bind_flags |= BIND_NO_CAP_NET_BIND_SERVICE; \
|
||||
|
||||
@@ -1665,7 +1665,7 @@ EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
|
||||
* returned value != 1 during execution. In all other cases, 0 is returned.
|
||||
*/
|
||||
int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
|
||||
struct sockaddr *uaddr,
|
||||
struct sockaddr_unsized *uaddr,
|
||||
int *uaddrlen,
|
||||
enum cgroup_bpf_attach_type atype,
|
||||
void *t_ctx,
|
||||
@@ -1673,7 +1673,7 @@ int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
|
||||
{
|
||||
struct bpf_sock_addr_kern ctx = {
|
||||
.sk = sk,
|
||||
.uaddr = uaddr,
|
||||
.uaddr = (struct sockaddr *)uaddr,
|
||||
.t_ctx = t_ctx,
|
||||
};
|
||||
struct sockaddr_storage unspec;
|
||||
|
||||
@@ -834,7 +834,7 @@ int inet_getname(struct socket *sock, struct sockaddr *uaddr,
|
||||
}
|
||||
sin->sin_port = inet->inet_dport;
|
||||
sin->sin_addr.s_addr = inet->inet_daddr;
|
||||
BPF_CGROUP_RUN_SA_PROG(sk, (struct sockaddr *)sin, &sin_addr_len,
|
||||
BPF_CGROUP_RUN_SA_PROG(sk, sin, &sin_addr_len,
|
||||
CGROUP_INET4_GETPEERNAME);
|
||||
} else {
|
||||
__be32 addr = inet->inet_rcv_saddr;
|
||||
@@ -842,7 +842,7 @@ int inet_getname(struct socket *sock, struct sockaddr *uaddr,
|
||||
addr = inet->inet_saddr;
|
||||
sin->sin_port = inet->inet_sport;
|
||||
sin->sin_addr.s_addr = addr;
|
||||
BPF_CGROUP_RUN_SA_PROG(sk, (struct sockaddr *)sin, &sin_addr_len,
|
||||
BPF_CGROUP_RUN_SA_PROG(sk, sin, &sin_addr_len,
|
||||
CGROUP_INET4_GETSOCKNAME);
|
||||
}
|
||||
release_sock(sk);
|
||||
|
||||
Reference in New Issue
Block a user