mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
mm: huge_memory: use folio_can_map_prot_numa() for pmd folio
The folio_can_map_prot_numa() checks whether the folio can map prot numa, which skips unsuitable folio, i.e. zone device, shared folios (KSM, CoW), non-movable dma pinned, dirty file folio and folios that already have the expected node affinity. Although the ksm only applies to small folios, an extra test was added for large folios, but the other policies should be applied to pmd folio, which helps to avoid unnecessary pmd change and folio migration attempts. Link: https://lkml.kernel.org/r/20251023113737.3572790-5-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Acked-by: David Hildenbrand <david@redhat.com> Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: Barry Song <baohua@kernel.org> Cc: Dev Jain <dev.jain@arm.com> Cc: Lance Yang <lance.yang@linux.dev> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Zi Yan <ziy@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
ca43034cdb
commit
f66e2727dd
@@ -2396,8 +2396,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
||||
#endif
|
||||
|
||||
if (prot_numa) {
|
||||
struct folio *folio;
|
||||
bool toptier;
|
||||
|
||||
/*
|
||||
* Avoid trapping faults against the zero page. The read-only
|
||||
* data is likely to be read-cached on the local CPU and
|
||||
@@ -2409,19 +2408,9 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
||||
if (pmd_protnone(*pmd))
|
||||
goto unlock;
|
||||
|
||||
folio = pmd_folio(*pmd);
|
||||
toptier = node_is_toptier(folio_nid(folio));
|
||||
/*
|
||||
* Skip scanning top tier node if normal numa
|
||||
* balancing is disabled
|
||||
*/
|
||||
if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) &&
|
||||
toptier)
|
||||
if (!folio_can_map_prot_numa(pmd_folio(*pmd), vma,
|
||||
vma_is_single_threaded_private(vma)))
|
||||
goto unlock;
|
||||
|
||||
if (folio_use_access_time(folio))
|
||||
folio_xchg_access_time(folio,
|
||||
jiffies_to_msecs(jiffies));
|
||||
}
|
||||
/*
|
||||
* In case prot_numa, we are under mmap_read_lock(mm). It's critical
|
||||
|
||||
Reference in New Issue
Block a user