mm/damon/vaddr: use vm_normal_folio{,_pmd}() instead of damon_get_folio()

A few page table walk entry callback functions in vaddr.c uses
damon_get_folio() with p{te,md}_pfn() to get the folio, and then
put_folio().  Simplify and drop unnecessary folio get/put by using
vm_normal_folio() and its friends instead.

Note that this cleanup was suggested by David Hildenbrand during a review
of another patch series [1] and the patch was updated following the
suggestion.  This patch further applies the cleanup to DAMON code that
merged before the patch.

Link: https://lkml.kernel.org/r/20251112154114.66053-5-sj@kernel.org
Link: https://lore.kernel.org/0cb3d5a5-683b-4dba-90a8-b45ab83eec53@redhat.com [1]
Signed-off-by: SeongJae Park <sj@kernel.org>
Suggested-by: David Hildenbrand <david@kernel.org>
Cc: Bill Wendling <morbo@google.com>
Cc: Brendan Higgins <brendan.higgins@linux.dev>
Cc: David Gow <davidgow@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Justin Stitt <justinstitt@google.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Miguel Ojeda <ojeda@kernel.org>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Nathan Chancellor <nathan@kernel.org>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
SeongJae Park
2025-11-12 07:41:07 -08:00
committed by Andrew Morton
parent 96549d56b8
commit f0eb046cd3

View File

@@ -444,7 +444,7 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
if (!pmd_present(pmde))
goto huge_out;
folio = damon_get_folio(pmd_pfn(pmde));
folio = vm_normal_folio_pmd(walk->vma, addr, pmde);
if (!folio)
goto huge_out;
if (pmd_young(pmde) || !folio_test_idle(folio) ||
@@ -452,7 +452,6 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
addr))
priv->young = true;
*priv->folio_sz = HPAGE_PMD_SIZE;
folio_put(folio);
huge_out:
spin_unlock(ptl);
return 0;
@@ -465,14 +464,13 @@ huge_out:
ptent = ptep_get(pte);
if (!pte_present(ptent))
goto out;
folio = damon_get_folio(pte_pfn(ptent));
folio = vm_normal_folio(walk->vma, addr, ptent);
if (!folio)
goto out;
if (pte_young(ptent) || !folio_test_idle(folio) ||
mmu_notifier_test_young(walk->mm, addr))
priv->young = true;
*priv->folio_sz = folio_size(folio);
folio_put(folio);
out:
pte_unmap_unlock(pte, ptl);
return 0;
@@ -720,18 +718,16 @@ static int damos_va_migrate_pmd_entry(pmd_t *pmd, unsigned long addr,
/* Tell page walk code to not split the PMD */
walk->action = ACTION_CONTINUE;
folio = damon_get_folio(pmd_pfn(pmde));
folio = vm_normal_folio_pmd(walk->vma, addr, pmde);
if (!folio)
goto unlock;
if (damos_va_filter_out(s, folio, walk->vma, addr, NULL, pmd))
goto put_folio;
goto unlock;
damos_va_migrate_dests_add(folio, walk->vma, addr, dests,
migration_lists);
put_folio:
folio_put(folio);
unlock:
spin_unlock(ptl);
return 0;
@@ -754,18 +750,15 @@ static int damos_va_migrate_pte_entry(pte_t *pte, unsigned long addr,
if (pte_none(ptent) || !pte_present(ptent))
return 0;
folio = damon_get_folio(pte_pfn(ptent));
folio = vm_normal_folio(walk->vma, addr, ptent);
if (!folio)
return 0;
if (damos_va_filter_out(s, folio, walk->vma, addr, pte, NULL))
goto put_folio;
return 0;
damos_va_migrate_dests_add(folio, walk->vma, addr, dests,
migration_lists);
put_folio:
folio_put(folio);
return 0;
}