mm/khugepaged: unify pmd folio installation with map_anon_folio_pmd()

Currently we install pmd folio with map_anon_folio_pmd() in
__do_huge_pmd_anonymous_page() and do_huge_zero_wp_pmd().  While in
collapse_huge_page(), it is done with identical code except statistics
adjustment.

Unify the process with map_anon_folio_pmd() to install pmd folio.  Split
it to map_anon_folio_pmd_pf() and map_anon_folio_pmd_nopf() to be used in
page fault or not respectively.

No functional change is intended.

[akpm@linux-foundation.org: remove unneeded map_anon_folio_pmd_nopf() stub, per Wei & David]
Link: https://lkml.kernel.org/r/20251008095453.18772-3-richard.weiyang@gmail.com
Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Acked-by: Lance Yang <lance.yang@linux.dev>
Cc: David Hildenbrand <david@redhat.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Usama Arif <usamaarif642@gmail.com>
Cc: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Wei Yang
2025-10-08 09:54:53 +00:00
committed by Andrew Morton
parent 2a1351cd41
commit ac7756771a
3 changed files with 13 additions and 12 deletions

View File

@@ -533,6 +533,8 @@ void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmd, bool freeze);
bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmdp, struct folio *folio);
void map_anon_folio_pmd_nopf(struct folio *folio, pmd_t *pmd,
struct vm_area_struct *vma, unsigned long haddr);
#else /* CONFIG_TRANSPARENT_HUGEPAGE */

View File

@@ -1218,7 +1218,7 @@ static struct folio *vma_alloc_anon_folio_pmd(struct vm_area_struct *vma,
return folio;
}
static void map_anon_folio_pmd(struct folio *folio, pmd_t *pmd,
void map_anon_folio_pmd_nopf(struct folio *folio, pmd_t *pmd,
struct vm_area_struct *vma, unsigned long haddr)
{
pmd_t entry;
@@ -1229,11 +1229,17 @@ static void map_anon_folio_pmd(struct folio *folio, pmd_t *pmd,
folio_add_lru_vma(folio, vma);
set_pmd_at(vma->vm_mm, haddr, pmd, entry);
update_mmu_cache_pmd(vma, haddr, pmd);
deferred_split_folio(folio, false);
}
static void map_anon_folio_pmd_pf(struct folio *folio, pmd_t *pmd,
struct vm_area_struct *vma, unsigned long haddr)
{
map_anon_folio_pmd_nopf(folio, pmd, vma, haddr);
add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
count_vm_event(THP_FAULT_ALLOC);
count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_ALLOC);
count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC);
deferred_split_folio(folio, false);
}
static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf)
@@ -1272,7 +1278,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf)
return ret;
}
pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
map_anon_folio_pmd(folio, vmf->pmd, vma, haddr);
map_anon_folio_pmd_pf(folio, vmf->pmd, vma, haddr);
mm_inc_nr_ptes(vma->vm_mm);
spin_unlock(vmf->ptl);
}
@@ -1944,7 +1950,7 @@ static vm_fault_t do_huge_zero_wp_pmd(struct vm_fault *vmf)
if (ret)
goto release;
(void)pmdp_huge_clear_flush(vma, haddr, vmf->pmd);
map_anon_folio_pmd(folio, vmf->pmd, vma, haddr);
map_anon_folio_pmd_pf(folio, vmf->pmd, vma, haddr);
goto unlock;
release:
folio_put(folio);

View File

@@ -1226,17 +1226,10 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
__folio_mark_uptodate(folio);
pgtable = pmd_pgtable(_pmd);
_pmd = folio_mk_pmd(folio, vma->vm_page_prot);
_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
spin_lock(pmd_ptl);
BUG_ON(!pmd_none(*pmd));
folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE);
folio_add_lru_vma(folio, vma);
pgtable_trans_huge_deposit(mm, pmd, pgtable);
set_pmd_at(mm, address, pmd, _pmd);
update_mmu_cache_pmd(vma, address, pmd);
deferred_split_folio(folio, false);
map_anon_folio_pmd_nopf(folio, pmd, vma, address);
spin_unlock(pmd_ptl);
folio = NULL;