slab: Remove folio references from free_large_kmalloc()

There's no need to use folio APIs here; just use a page directly.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Link: https://patch.msgid.link/20251113000932.1589073-7-willy@infradead.org
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
Matthew Wilcox (Oracle)
2025-11-13 00:09:20 +00:00
committed by Vlastimil Babka
parent f9395bf5db
commit 0bdfdd6a05

View File

@@ -6742,12 +6742,12 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
} }
EXPORT_SYMBOL(kmem_cache_free); EXPORT_SYMBOL(kmem_cache_free);
static void free_large_kmalloc(struct folio *folio, void *object) static void free_large_kmalloc(struct page *page, void *object)
{ {
unsigned int order = folio_order(folio); unsigned int order = compound_order(page);
if (WARN_ON_ONCE(!folio_test_large_kmalloc(folio))) { if (WARN_ON_ONCE(!PageLargeKmalloc(page))) {
dump_page(&folio->page, "Not a kmalloc allocation"); dump_page(page, "Not a kmalloc allocation");
return; return;
} }
@@ -6758,10 +6758,10 @@ static void free_large_kmalloc(struct folio *folio, void *object)
kasan_kfree_large(object); kasan_kfree_large(object);
kmsan_kfree_large(object); kmsan_kfree_large(object);
lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
-(PAGE_SIZE << order)); -(PAGE_SIZE << order));
__folio_clear_large_kmalloc(folio); __ClearPageLargeKmalloc(page);
free_frozen_pages(&folio->page, order); free_frozen_pages(page, order);
} }
/* /*
@@ -6789,7 +6789,7 @@ void kvfree_rcu_cb(struct rcu_head *head)
* consider folio order * consider folio order
*/ */
obj = (void *) PAGE_ALIGN_DOWN((unsigned long)obj); obj = (void *) PAGE_ALIGN_DOWN((unsigned long)obj);
free_large_kmalloc(folio, obj); free_large_kmalloc(&folio->page, obj);
return; return;
} }
@@ -6829,7 +6829,7 @@ void kfree(const void *object)
folio = virt_to_folio(object); folio = virt_to_folio(object);
if (unlikely(!folio_test_slab(folio))) { if (unlikely(!folio_test_slab(folio))) {
free_large_kmalloc(folio, (void *)object); free_large_kmalloc(&folio->page, (void *)object);
return; return;
} }
@@ -7253,7 +7253,7 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
if (!s) { if (!s) {
/* Handle kalloc'ed objects */ /* Handle kalloc'ed objects */
if (unlikely(!folio_test_slab(folio))) { if (unlikely(!folio_test_slab(folio))) {
free_large_kmalloc(folio, object); free_large_kmalloc(&folio->page, object);
df->slab = NULL; df->slab = NULL;
return size; return size;
} }