mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
bpf: Always charge/uncharge memory when allocating/unlinking storage elements
Since commit a96a44aba5 ("bpf: bpf_sk_storage: Fix invalid wait
context lockdep report"), {charge,uncharge}_mem are always true when
allocating a bpf_local_storage_elem or unlinking a bpf_local_storage_elem
from local storage, so drop these arguments. No functional change.
Signed-off-by: Amery Hung <ameryhung@gmail.com>
Reviewed-by: Martin KaFai Lau <martin.lau@kernel.org>
Link: https://lore.kernel.org/r/20251114201329.3275875-2-ameryhung@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
committed by
Alexei Starovoitov
parent
ec12ab2cda
commit
0e854e5535
@@ -184,7 +184,7 @@ void bpf_selem_link_map(struct bpf_local_storage_map *smap,
|
||||
|
||||
struct bpf_local_storage_elem *
|
||||
bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, void *value,
|
||||
bool charge_mem, bool swap_uptrs, gfp_t gfp_flags);
|
||||
bool swap_uptrs, gfp_t gfp_flags);
|
||||
|
||||
void bpf_selem_free(struct bpf_local_storage_elem *selem,
|
||||
struct bpf_local_storage_map *smap,
|
||||
|
||||
@@ -73,11 +73,11 @@ static bool selem_linked_to_map(const struct bpf_local_storage_elem *selem)
|
||||
|
||||
struct bpf_local_storage_elem *
|
||||
bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
|
||||
void *value, bool charge_mem, bool swap_uptrs, gfp_t gfp_flags)
|
||||
void *value, bool swap_uptrs, gfp_t gfp_flags)
|
||||
{
|
||||
struct bpf_local_storage_elem *selem;
|
||||
|
||||
if (charge_mem && mem_charge(smap, owner, smap->elem_size))
|
||||
if (mem_charge(smap, owner, smap->elem_size))
|
||||
return NULL;
|
||||
|
||||
if (smap->bpf_ma) {
|
||||
@@ -106,8 +106,7 @@ bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
|
||||
return selem;
|
||||
}
|
||||
|
||||
if (charge_mem)
|
||||
mem_uncharge(smap, owner, smap->elem_size);
|
||||
mem_uncharge(smap, owner, smap->elem_size);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
@@ -284,7 +283,7 @@ static void bpf_selem_free_list(struct hlist_head *list, bool reuse_now)
|
||||
*/
|
||||
static bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
|
||||
struct bpf_local_storage_elem *selem,
|
||||
bool uncharge_mem, struct hlist_head *free_selem_list)
|
||||
struct hlist_head *free_selem_list)
|
||||
{
|
||||
struct bpf_local_storage_map *smap;
|
||||
bool free_local_storage;
|
||||
@@ -297,8 +296,7 @@ static bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_stor
|
||||
* The owner may be freed once the last selem is unlinked
|
||||
* from local_storage.
|
||||
*/
|
||||
if (uncharge_mem)
|
||||
mem_uncharge(smap, owner, smap->elem_size);
|
||||
mem_uncharge(smap, owner, smap->elem_size);
|
||||
|
||||
free_local_storage = hlist_is_singular_node(&selem->snode,
|
||||
&local_storage->list);
|
||||
@@ -393,7 +391,7 @@ static void bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem,
|
||||
raw_spin_lock_irqsave(&local_storage->lock, flags);
|
||||
if (likely(selem_linked_to_storage(selem)))
|
||||
free_local_storage = bpf_selem_unlink_storage_nolock(
|
||||
local_storage, selem, true, &selem_free_list);
|
||||
local_storage, selem, &selem_free_list);
|
||||
raw_spin_unlock_irqrestore(&local_storage->lock, flags);
|
||||
|
||||
bpf_selem_free_list(&selem_free_list, reuse_now);
|
||||
@@ -582,7 +580,7 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
selem = bpf_selem_alloc(smap, owner, value, true, swap_uptrs, gfp_flags);
|
||||
selem = bpf_selem_alloc(smap, owner, value, swap_uptrs, gfp_flags);
|
||||
if (!selem)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
@@ -616,7 +614,7 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
|
||||
/* A lookup has just been done before and concluded a new selem is
|
||||
* needed. The chance of an unnecessary alloc is unlikely.
|
||||
*/
|
||||
alloc_selem = selem = bpf_selem_alloc(smap, owner, value, true, swap_uptrs, gfp_flags);
|
||||
alloc_selem = selem = bpf_selem_alloc(smap, owner, value, swap_uptrs, gfp_flags);
|
||||
if (!alloc_selem)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
@@ -656,7 +654,7 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
|
||||
if (old_sdata) {
|
||||
bpf_selem_unlink_map(SELEM(old_sdata));
|
||||
bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata),
|
||||
true, &old_selem_free_list);
|
||||
&old_selem_free_list);
|
||||
}
|
||||
|
||||
unlock:
|
||||
@@ -762,7 +760,7 @@ void bpf_local_storage_destroy(struct bpf_local_storage *local_storage)
|
||||
* of the loop will set the free_cgroup_storage to true.
|
||||
*/
|
||||
free_storage = bpf_selem_unlink_storage_nolock(
|
||||
local_storage, selem, true, &free_selem_list);
|
||||
local_storage, selem, &free_selem_list);
|
||||
}
|
||||
raw_spin_unlock_irqrestore(&local_storage->lock, flags);
|
||||
|
||||
|
||||
@@ -136,7 +136,7 @@ bpf_sk_storage_clone_elem(struct sock *newsk,
|
||||
{
|
||||
struct bpf_local_storage_elem *copy_selem;
|
||||
|
||||
copy_selem = bpf_selem_alloc(smap, newsk, NULL, true, false, GFP_ATOMIC);
|
||||
copy_selem = bpf_selem_alloc(smap, newsk, NULL, false, GFP_ATOMIC);
|
||||
if (!copy_selem)
|
||||
return NULL;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user