slab: Remove folio references from __ksize()

In the future, we will separate slab, folio and page from each other
and calling virt_to_folio() on an address allocated from slab will
return NULL.  Delay the conversion from struct page to struct slab
until we know we're not dealing with a large kmalloc allocation.
There's a minor win for large kmalloc allocations as we avoid the
compound_head() hidden in virt_to_folio().

This deprecates calling ksize() on memory allocated by alloc_pages().
Today it becomes a warning and support will be removed entirely in
the future.

Introduce large_kmalloc_size() to abstract how we represent the size
of a large kmalloc allocation.  For now, this is the same as
page_size(), but it will change with separately allocated memdescs.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Link: https://patch.msgid.link/20251113000932.1589073-3-willy@infradead.org
Acked-by: David Hildenbrand (Red Hat) <david@kernel.org>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
Matthew Wilcox (Oracle)
2025-11-13 00:09:16 +00:00
committed by Vlastimil Babka
parent 2bcd3800f2
commit ee1ee8abc4
3 changed files with 23 additions and 12 deletions

View File

@@ -1064,7 +1064,7 @@ PAGE_TYPE_OPS(Zsmalloc, zsmalloc, zsmalloc)
* Serialized with zone lock.
*/
PAGE_TYPE_OPS(Unaccepted, unaccepted, unaccepted)
FOLIO_TYPE_OPS(large_kmalloc, large_kmalloc)
PAGE_TYPE_OPS(LargeKmalloc, large_kmalloc, large_kmalloc)
/**
* PageHuge - Determine if the page belongs to hugetlbfs

View File

@@ -605,6 +605,16 @@ static inline size_t slab_ksize(const struct kmem_cache *s)
return s->size;
}
static inline unsigned int large_kmalloc_order(const struct page *page)
{
return page[1].flags.f & 0xff;
}
static inline size_t large_kmalloc_size(const struct page *page)
{
return PAGE_SIZE << large_kmalloc_order(page);
}
#ifdef CONFIG_SLUB_DEBUG
void dump_unreclaimable_slab(void);
#else

View File

@@ -997,26 +997,27 @@ void __init create_kmalloc_caches(void)
*/
size_t __ksize(const void *object)
{
struct folio *folio;
const struct page *page;
const struct slab *slab;
if (unlikely(object == ZERO_SIZE_PTR))
return 0;
folio = virt_to_folio(object);
page = virt_to_page(object);
if (unlikely(!folio_test_slab(folio))) {
if (WARN_ON(folio_size(folio) <= KMALLOC_MAX_CACHE_SIZE))
return 0;
if (WARN_ON(object != folio_address(folio)))
return 0;
return folio_size(folio);
}
if (unlikely(PageLargeKmalloc(page)))
return large_kmalloc_size(page);
slab = page_slab(page);
/* Delete this after we're sure there are no users */
if (WARN_ON(!slab))
return page_size(page);
#ifdef CONFIG_SLUB_DEBUG
skip_orig_size_check(folio_slab(folio)->slab_cache, object);
skip_orig_size_check(slab->slab_cache, object);
#endif
return slab_ksize(folio_slab(folio)->slab_cache);
return slab_ksize(slab->slab_cache);
}
gfp_t kmalloc_fix_flags(gfp_t flags)