slab: Remove folio references in memcg_slab_post_charge()

This allows us to skip the compound_head() call for large kmalloc
objects as the virt_to_page() call will always give us the head page
for the large kmalloc case.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Link: https://patch.msgid.link/20251113000932.1589073-4-willy@infradead.org
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
Matthew Wilcox (Oracle)
2025-11-13 00:09:17 +00:00
committed by Vlastimil Babka
parent ee1ee8abc4
commit ea4702b170

View File

@@ -2369,33 +2369,34 @@ bool memcg_slab_post_charge(void *p, gfp_t flags)
{
struct slabobj_ext *slab_exts;
struct kmem_cache *s;
struct folio *folio;
struct page *page;
struct slab *slab;
unsigned long off;
folio = virt_to_folio(p);
if (!folio_test_slab(folio)) {
page = virt_to_page(p);
if (PageLargeKmalloc(page)) {
unsigned int order;
int size;
if (folio_memcg_kmem(folio))
if (PageMemcgKmem(page))
return true;
if (__memcg_kmem_charge_page(folio_page(folio, 0), flags,
folio_order(folio)))
order = large_kmalloc_order(page);
if (__memcg_kmem_charge_page(page, flags, order))
return false;
/*
* This folio has already been accounted in the global stats but
* This page has already been accounted in the global stats but
* not in the memcg stats. So, subtract from the global and use
* the interface which adds to both global and memcg stats.
*/
size = folio_size(folio);
node_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, -size);
lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, size);
size = PAGE_SIZE << order;
mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B, -size);
mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, size);
return true;
}
slab = folio_slab(folio);
slab = page_slab(page);
s = slab->slab_cache;
/*