mm/filemap: Extend __filemap_get_folio() to support NUMA memory policies

Extend __filemap_get_folio() to support NUMA memory policies by
renaming the implementation to __filemap_get_folio_mpol() and adding
a mempolicy parameter. The original function becomes a static inline
wrapper that passes NULL for the mempolicy.

This infrastructure will enable future support for NUMA-aware page cache
allocations in guest_memfd memory backend KVM guests.

Reviewed-by: Pankaj Gupta <pankaj.gupta@amd.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Shivank Garg <shivankg@amd.com>
Tested-by: Ashish Kalra <ashish.kalra@amd.com>
Link: https://lore.kernel.org/r/20250827175247.83322-5-shivankg@amd.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
Matthew Wilcox
2025-08-27 17:52:44 +00:00
committed by Sean Christopherson
parent 7f3779a3ac
commit 16a542e223
2 changed files with 14 additions and 7 deletions

View File

@@ -755,11 +755,17 @@ static inline fgf_t fgf_set_order(size_t size)
}
void *filemap_get_entry(struct address_space *mapping, pgoff_t index);
struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
fgf_t fgp_flags, gfp_t gfp);
struct folio *__filemap_get_folio_mpol(struct address_space *mapping,
pgoff_t index, fgf_t fgf_flags, gfp_t gfp, struct mempolicy *policy);
struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
fgf_t fgp_flags, gfp_t gfp);
static inline struct folio *__filemap_get_folio(struct address_space *mapping,
pgoff_t index, fgf_t fgf_flags, gfp_t gfp)
{
return __filemap_get_folio_mpol(mapping, index, fgf_flags, gfp, NULL);
}
/**
* write_begin_get_folio - Get folio for write_begin with flags.
* @iocb: The kiocb passed from write_begin (may be NULL).

View File

@@ -1928,11 +1928,12 @@ out:
}
/**
* __filemap_get_folio - Find and get a reference to a folio.
* __filemap_get_folio_mpol - Find and get a reference to a folio.
* @mapping: The address_space to search.
* @index: The page index.
* @fgp_flags: %FGP flags modify how the folio is returned.
* @gfp: Memory allocation flags to use if %FGP_CREAT is specified.
* @policy: NUMA memory allocation policy to follow.
*
* Looks up the page cache entry at @mapping & @index.
*
@@ -1943,8 +1944,8 @@ out:
*
* Return: The found folio or an ERR_PTR() otherwise.
*/
struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
fgf_t fgp_flags, gfp_t gfp)
struct folio *__filemap_get_folio_mpol(struct address_space *mapping,
pgoff_t index, fgf_t fgp_flags, gfp_t gfp, struct mempolicy *policy)
{
struct folio *folio;
@@ -2014,7 +2015,7 @@ no_page:
err = -ENOMEM;
if (order > min_order)
alloc_gfp |= __GFP_NORETRY | __GFP_NOWARN;
folio = filemap_alloc_folio(alloc_gfp, order, NULL);
folio = filemap_alloc_folio(alloc_gfp, order, policy);
if (!folio)
continue;
@@ -2061,7 +2062,7 @@ no_page:
folio_clear_dropbehind(folio);
return folio;
}
EXPORT_SYMBOL(__filemap_get_folio);
EXPORT_SYMBOL(__filemap_get_folio_mpol);
static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max,
xa_mark_t mark)