mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
memcg: Convert mem_cgroup_from_obj_folio() to mem_cgroup_from_obj_slab()
In preparation for splitting struct slab from struct page and struct folio, convert the pointer to a slab rather than a folio. This means we can end up passing a NULL slab pointer to mem_cgroup_from_obj_slab() if the pointer is not to a page allocated to slab, and we handle that appropriately by returning NULL. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Roman Gushchin <roman.gushchin@linux.dev> Cc: Shakeel Butt <shakeel.butt@linux.dev> Cc: Muchun Song <muchun.song@linux.dev> Cc: cgroups@vger.kernel.org Link: https://patch.msgid.link/20251113000932.1589073-15-willy@infradead.org Acked-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
committed by
Vlastimil Babka
parent
5934b1be8d
commit
b8557d109e
@@ -2557,38 +2557,25 @@ static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
|
||||
struct mem_cgroup *mem_cgroup_from_obj_slab(struct slab *slab, void *p)
|
||||
{
|
||||
/*
|
||||
* Slab objects are accounted individually, not per-page.
|
||||
* Memcg membership data for each individual object is saved in
|
||||
* slab->obj_exts.
|
||||
*/
|
||||
if (folio_test_slab(folio)) {
|
||||
struct slabobj_ext *obj_exts;
|
||||
struct slab *slab;
|
||||
unsigned int off;
|
||||
|
||||
slab = folio_slab(folio);
|
||||
obj_exts = slab_obj_exts(slab);
|
||||
if (!obj_exts)
|
||||
return NULL;
|
||||
|
||||
off = obj_to_index(slab->slab_cache, slab, p);
|
||||
if (obj_exts[off].objcg)
|
||||
return obj_cgroup_memcg(obj_exts[off].objcg);
|
||||
struct slabobj_ext *obj_exts;
|
||||
unsigned int off;
|
||||
|
||||
obj_exts = slab_obj_exts(slab);
|
||||
if (!obj_exts)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* folio_memcg_check() is used here, because in theory we can encounter
|
||||
* a folio where the slab flag has been cleared already, but
|
||||
* slab->obj_exts has not been freed yet
|
||||
* folio_memcg_check() will guarantee that a proper memory
|
||||
* cgroup pointer or NULL will be returned.
|
||||
*/
|
||||
return folio_memcg_check(folio);
|
||||
off = obj_to_index(slab->slab_cache, slab, p);
|
||||
if (obj_exts[off].objcg)
|
||||
return obj_cgroup_memcg(obj_exts[off].objcg);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2602,10 +2589,15 @@ struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
|
||||
*/
|
||||
struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
|
||||
{
|
||||
struct slab *slab;
|
||||
|
||||
if (mem_cgroup_disabled())
|
||||
return NULL;
|
||||
|
||||
return mem_cgroup_from_obj_folio(virt_to_folio(p), p);
|
||||
slab = virt_to_slab(p);
|
||||
if (slab)
|
||||
return mem_cgroup_from_obj_slab(slab, p);
|
||||
return folio_memcg_check(virt_to_folio(p));
|
||||
}
|
||||
|
||||
static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
|
||||
|
||||
Reference in New Issue
Block a user