mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
bpf: Consistently use bpf_rcu_lock_held() everywhere
We have many places which open-code what's now is bpf_rcu_lock_held() macro, so replace all those places with a clean and short macro invocation. For that, move bpf_rcu_lock_held() macro into include/linux/bpf.h. Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Jiri Olsa <jolsa@kernel.org> Link: https://lore.kernel.org/bpf/20251014201403.4104511-1-andrii@kernel.org
This commit is contained in:
committed by
Daniel Borkmann
parent
39e9d5f630
commit
48a97ffc6c
@@ -2381,6 +2381,9 @@ bpf_prog_run_array_uprobe(const struct bpf_prog_array *array,
|
||||
bool bpf_jit_bypass_spec_v1(void);
|
||||
bool bpf_jit_bypass_spec_v4(void);
|
||||
|
||||
#define bpf_rcu_lock_held() \
|
||||
(rcu_read_lock_held() || rcu_read_lock_trace_held() || rcu_read_lock_bh_held())
|
||||
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
DECLARE_PER_CPU(int, bpf_prog_active);
|
||||
extern struct mutex bpf_stats_enabled_mutex;
|
||||
|
||||
@@ -18,9 +18,6 @@
|
||||
|
||||
#define BPF_LOCAL_STORAGE_CACHE_SIZE 16
|
||||
|
||||
#define bpf_rcu_lock_held() \
|
||||
(rcu_read_lock_held() || rcu_read_lock_trace_held() || \
|
||||
rcu_read_lock_bh_held())
|
||||
struct bpf_local_storage_map_bucket {
|
||||
struct hlist_head list;
|
||||
raw_spinlock_t lock;
|
||||
|
||||
@@ -657,8 +657,7 @@ static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
|
||||
struct htab_elem *l;
|
||||
u32 hash, key_size;
|
||||
|
||||
WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
|
||||
!rcu_read_lock_bh_held());
|
||||
WARN_ON_ONCE(!bpf_rcu_lock_held());
|
||||
|
||||
key_size = map->key_size;
|
||||
|
||||
@@ -1086,8 +1085,7 @@ static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
|
||||
/* unknown flags */
|
||||
return -EINVAL;
|
||||
|
||||
WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
|
||||
!rcu_read_lock_bh_held());
|
||||
WARN_ON_ONCE(!bpf_rcu_lock_held());
|
||||
|
||||
key_size = map->key_size;
|
||||
|
||||
@@ -1194,8 +1192,7 @@ static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value
|
||||
/* unknown flags */
|
||||
return -EINVAL;
|
||||
|
||||
WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
|
||||
!rcu_read_lock_bh_held());
|
||||
WARN_ON_ONCE(!bpf_rcu_lock_held());
|
||||
|
||||
key_size = map->key_size;
|
||||
|
||||
@@ -1263,8 +1260,7 @@ static long htab_map_update_elem_in_place(struct bpf_map *map, void *key,
|
||||
/* unknown flags */
|
||||
return -EINVAL;
|
||||
|
||||
WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
|
||||
!rcu_read_lock_bh_held());
|
||||
WARN_ON_ONCE(!bpf_rcu_lock_held());
|
||||
|
||||
key_size = map->key_size;
|
||||
|
||||
@@ -1326,8 +1322,7 @@ static long __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
|
||||
/* unknown flags */
|
||||
return -EINVAL;
|
||||
|
||||
WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
|
||||
!rcu_read_lock_bh_held());
|
||||
WARN_ON_ONCE(!bpf_rcu_lock_held());
|
||||
|
||||
key_size = map->key_size;
|
||||
|
||||
@@ -1404,8 +1399,7 @@ static long htab_map_delete_elem(struct bpf_map *map, void *key)
|
||||
u32 hash, key_size;
|
||||
int ret;
|
||||
|
||||
WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
|
||||
!rcu_read_lock_bh_held());
|
||||
WARN_ON_ONCE(!bpf_rcu_lock_held());
|
||||
|
||||
key_size = map->key_size;
|
||||
|
||||
@@ -1440,8 +1434,7 @@ static long htab_lru_map_delete_elem(struct bpf_map *map, void *key)
|
||||
u32 hash, key_size;
|
||||
int ret;
|
||||
|
||||
WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
|
||||
!rcu_read_lock_bh_held());
|
||||
WARN_ON_ONCE(!bpf_rcu_lock_held());
|
||||
|
||||
key_size = map->key_size;
|
||||
|
||||
|
||||
@@ -42,8 +42,7 @@
|
||||
*/
|
||||
BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
|
||||
{
|
||||
WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
|
||||
!rcu_read_lock_bh_held());
|
||||
WARN_ON_ONCE(!bpf_rcu_lock_held());
|
||||
return (unsigned long) map->ops->map_lookup_elem(map, key);
|
||||
}
|
||||
|
||||
@@ -59,8 +58,7 @@ const struct bpf_func_proto bpf_map_lookup_elem_proto = {
|
||||
BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
|
||||
void *, value, u64, flags)
|
||||
{
|
||||
WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
|
||||
!rcu_read_lock_bh_held());
|
||||
WARN_ON_ONCE(!bpf_rcu_lock_held());
|
||||
return map->ops->map_update_elem(map, key, value, flags);
|
||||
}
|
||||
|
||||
@@ -77,8 +75,7 @@ const struct bpf_func_proto bpf_map_update_elem_proto = {
|
||||
|
||||
BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
|
||||
{
|
||||
WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
|
||||
!rcu_read_lock_bh_held());
|
||||
WARN_ON_ONCE(!bpf_rcu_lock_held());
|
||||
return map->ops->map_delete_elem(map, key);
|
||||
}
|
||||
|
||||
@@ -134,8 +131,7 @@ const struct bpf_func_proto bpf_map_peek_elem_proto = {
|
||||
|
||||
BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu)
|
||||
{
|
||||
WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
|
||||
!rcu_read_lock_bh_held());
|
||||
WARN_ON_ONCE(!bpf_rcu_lock_held());
|
||||
return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu);
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user