mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
mm: avoid unnecessary use of is_swap_pmd()
PMD 'non-swap' swap entries are currently used for PMD-level migration entries and device private entries. To add to the confusion in this terminology we use is_swap_pmd() in an inconsistent way similar to how is_swap_pte() was being used - sometimes adopting the convention that !pmd_none(), !pmd_present() implies PMD 'swap' entry, sometimes not. This patch handles the low-hanging fruit of cases where we can simply substitute other predicates for is_swap_pmd(). No functional change intended. Link: https://lkml.kernel.org/r/8a1704b36a009c18032d5bea4cb68e71448fbbe5.1762812360.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Alistair Popple <apopple@nvidia.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Axel Rasmussen <axelrasmussen@google.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: Baoquan He <bhe@redhat.com> Cc: Barry Song <baohua@kernel.org> Cc: Byungchul Park <byungchul@sk.com> Cc: Chengming Zhou <chengming.zhou@linux.dev> Cc: Chris Li <chrisl@kernel.org> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christian Brauner <brauner@kernel.org> Cc: Claudio Imbrenda <imbrenda@linux.ibm.com> Cc: David Hildenbrand <david@redhat.com> Cc: Dev Jain <dev.jain@arm.com> Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com> Cc: Gregory Price <gourry@gourry.net> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: "Huang, Ying" <ying.huang@linux.alibaba.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jan Kara <jack@suse.cz> Cc: Jann Horn <jannh@google.com> Cc: Janosch Frank <frankja@linux.ibm.com> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Joshua Hahn <joshua.hahnjy@gmail.com> Cc: Kairui Song <kasong@tencent.com> Cc: Kemeng Shi <shikemeng@huaweicloud.com> Cc: Lance Yang <lance.yang@linux.dev> Cc: Leon Romanovsky <leon@kernel.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mathew Brost <matthew.brost@intel.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Rapoport <rppt@kernel.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Naoya Horiguchi <nao.horiguchi@gmail.com> Cc: Nhat Pham <nphamcs@gmail.com> Cc: Nico Pache <npache@redhat.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Pasha Tatashin <pasha.tatashin@soleen.com> Cc: Peter Xu <peterx@redhat.com> Cc: Rakie Kim <rakie.kim@sk.com> Cc: Rik van Riel <riel@surriel.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: SeongJae Park <sj@kernel.org> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Wei Xu <weixugc@google.com> Cc: xu xin <xu.xin16@zte.com.cn> Cc: Yuanchu Xie <yuanchu@google.com> Cc: Zi Yan <ziy@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
de4d6c9491
commit
aa62204cb6
@@ -1059,10 +1059,12 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
|
||||
bool present = false;
|
||||
struct folio *folio;
|
||||
|
||||
if (pmd_none(*pmd))
|
||||
return;
|
||||
if (pmd_present(*pmd)) {
|
||||
page = vm_normal_page_pmd(vma, addr, *pmd);
|
||||
present = true;
|
||||
} else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
|
||||
} else if (unlikely(thp_migration_supported())) {
|
||||
swp_entry_t entry = pmd_to_swp_entry(*pmd);
|
||||
|
||||
if (is_pfn_swap_entry(entry))
|
||||
@@ -2000,6 +2002,9 @@ static int pagemap_pmd_range_thp(pmd_t *pmdp, unsigned long addr,
|
||||
if (vma->vm_flags & VM_SOFTDIRTY)
|
||||
flags |= PM_SOFT_DIRTY;
|
||||
|
||||
if (pmd_none(pmd))
|
||||
goto populate_pagemap;
|
||||
|
||||
if (pmd_present(pmd)) {
|
||||
page = pmd_page(pmd);
|
||||
|
||||
@@ -2010,7 +2015,7 @@ static int pagemap_pmd_range_thp(pmd_t *pmdp, unsigned long addr,
|
||||
flags |= PM_UFFD_WP;
|
||||
if (pm->show_pfn)
|
||||
frame = pmd_pfn(pmd) + idx;
|
||||
} else if (thp_migration_supported() && is_swap_pmd(pmd)) {
|
||||
} else if (thp_migration_supported()) {
|
||||
swp_entry_t entry = pmd_to_swp_entry(pmd);
|
||||
unsigned long offset;
|
||||
|
||||
@@ -2037,6 +2042,7 @@ static int pagemap_pmd_range_thp(pmd_t *pmdp, unsigned long addr,
|
||||
flags |= PM_FILE;
|
||||
}
|
||||
|
||||
populate_pagemap:
|
||||
for (; addr != end; addr += PAGE_SIZE, idx++) {
|
||||
u64 cur_flags = flags;
|
||||
pagemap_entry_t pme;
|
||||
@@ -2399,6 +2405,9 @@ static unsigned long pagemap_thp_category(struct pagemap_scan_private *p,
|
||||
{
|
||||
unsigned long categories = PAGE_IS_HUGE;
|
||||
|
||||
if (pmd_none(pmd))
|
||||
return categories;
|
||||
|
||||
if (pmd_present(pmd)) {
|
||||
struct page *page;
|
||||
|
||||
@@ -2416,7 +2425,7 @@ static unsigned long pagemap_thp_category(struct pagemap_scan_private *p,
|
||||
categories |= PAGE_IS_PFNZERO;
|
||||
if (pmd_soft_dirty(pmd))
|
||||
categories |= PAGE_IS_SOFT_DIRTY;
|
||||
} else if (is_swap_pmd(pmd)) {
|
||||
} else {
|
||||
swp_entry_t swp;
|
||||
|
||||
categories |= PAGE_IS_SWAPPED;
|
||||
|
||||
@@ -509,7 +509,13 @@ static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
|
||||
|
||||
static inline int is_pmd_migration_entry(pmd_t pmd)
|
||||
{
|
||||
return is_swap_pmd(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
|
||||
swp_entry_t entry;
|
||||
|
||||
if (pmd_present(pmd))
|
||||
return 0;
|
||||
|
||||
entry = pmd_to_swp_entry(pmd);
|
||||
return is_migration_entry(entry);
|
||||
}
|
||||
#else /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
|
||||
static inline int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
|
||||
@@ -557,7 +563,13 @@ static inline int is_pmd_migration_entry(pmd_t pmd)
|
||||
*/
|
||||
static inline int is_pmd_device_private_entry(pmd_t pmd)
|
||||
{
|
||||
return is_swap_pmd(pmd) && is_device_private_entry(pmd_to_swp_entry(pmd));
|
||||
swp_entry_t entry;
|
||||
|
||||
if (pmd_present(pmd))
|
||||
return 0;
|
||||
|
||||
entry = pmd_to_swp_entry(pmd);
|
||||
return is_device_private_entry(entry);
|
||||
}
|
||||
|
||||
#else /* CONFIG_ZONE_DEVICE && CONFIG_ARCH_ENABLE_THP_MIGRATION */
|
||||
|
||||
@@ -2354,9 +2354,11 @@ static pmd_t move_soft_dirty_pmd(pmd_t pmd)
|
||||
|
||||
static pmd_t clear_uffd_wp_pmd(pmd_t pmd)
|
||||
{
|
||||
if (pmd_none(pmd))
|
||||
return pmd;
|
||||
if (pmd_present(pmd))
|
||||
pmd = pmd_clear_uffd_wp(pmd);
|
||||
else if (is_swap_pmd(pmd))
|
||||
else
|
||||
pmd = pmd_swp_clear_uffd_wp(pmd);
|
||||
|
||||
return pmd;
|
||||
|
||||
50
mm/memory.c
50
mm/memory.c
@@ -1376,6 +1376,7 @@ copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
|
||||
next = pmd_addr_end(addr, end);
|
||||
if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)) {
|
||||
int err;
|
||||
|
||||
VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma);
|
||||
err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd,
|
||||
addr, dst_vma, src_vma);
|
||||
@@ -6340,35 +6341,40 @@ retry_pud:
|
||||
if (pmd_none(*vmf.pmd) &&
|
||||
thp_vma_allowable_order(vma, vm_flags, TVA_PAGEFAULT, PMD_ORDER)) {
|
||||
ret = create_huge_pmd(&vmf);
|
||||
if (!(ret & VM_FAULT_FALLBACK))
|
||||
if (ret & VM_FAULT_FALLBACK)
|
||||
goto fallback;
|
||||
else
|
||||
return ret;
|
||||
} else {
|
||||
vmf.orig_pmd = pmdp_get_lockless(vmf.pmd);
|
||||
}
|
||||
|
||||
if (unlikely(is_swap_pmd(vmf.orig_pmd))) {
|
||||
if (is_pmd_device_private_entry(vmf.orig_pmd))
|
||||
return do_huge_pmd_device_private(&vmf);
|
||||
vmf.orig_pmd = pmdp_get_lockless(vmf.pmd);
|
||||
if (pmd_none(vmf.orig_pmd))
|
||||
goto fallback;
|
||||
|
||||
if (is_pmd_migration_entry(vmf.orig_pmd))
|
||||
pmd_migration_entry_wait(mm, vmf.pmd);
|
||||
if (unlikely(!pmd_present(vmf.orig_pmd))) {
|
||||
if (is_pmd_device_private_entry(vmf.orig_pmd))
|
||||
return do_huge_pmd_device_private(&vmf);
|
||||
|
||||
if (is_pmd_migration_entry(vmf.orig_pmd))
|
||||
pmd_migration_entry_wait(mm, vmf.pmd);
|
||||
return 0;
|
||||
}
|
||||
if (pmd_trans_huge(vmf.orig_pmd)) {
|
||||
if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma))
|
||||
return do_huge_pmd_numa_page(&vmf);
|
||||
|
||||
if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
|
||||
!pmd_write(vmf.orig_pmd)) {
|
||||
ret = wp_huge_pmd(&vmf);
|
||||
if (!(ret & VM_FAULT_FALLBACK))
|
||||
return ret;
|
||||
} else {
|
||||
huge_pmd_set_accessed(&vmf);
|
||||
return 0;
|
||||
}
|
||||
if (pmd_trans_huge(vmf.orig_pmd)) {
|
||||
if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma))
|
||||
return do_huge_pmd_numa_page(&vmf);
|
||||
|
||||
if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
|
||||
!pmd_write(vmf.orig_pmd)) {
|
||||
ret = wp_huge_pmd(&vmf);
|
||||
if (!(ret & VM_FAULT_FALLBACK))
|
||||
return ret;
|
||||
} else {
|
||||
huge_pmd_set_accessed(&vmf);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fallback:
|
||||
return handle_pte_fault(&vmf);
|
||||
}
|
||||
|
||||
|
||||
@@ -215,10 +215,14 @@ EXPORT_SYMBOL(__page_table_check_ptes_set);
|
||||
|
||||
static inline void page_table_check_pmd_flags(pmd_t pmd)
|
||||
{
|
||||
if (pmd_present(pmd) && pmd_uffd_wp(pmd))
|
||||
WARN_ON_ONCE(pmd_write(pmd));
|
||||
else if (is_swap_pmd(pmd) && pmd_swp_uffd_wp(pmd))
|
||||
WARN_ON_ONCE(swap_cached_writable(pmd_to_swp_entry(pmd)));
|
||||
if (pmd_present(pmd)) {
|
||||
if (pmd_uffd_wp(pmd))
|
||||
WARN_ON_ONCE(pmd_write(pmd));
|
||||
} else if (pmd_swp_uffd_wp(pmd)) {
|
||||
swp_entry_t entry = pmd_to_swp_entry(pmd);
|
||||
|
||||
WARN_ON_ONCE(swap_cached_writable(entry));
|
||||
}
|
||||
}
|
||||
|
||||
void __page_table_check_pmds_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd,
|
||||
|
||||
Reference in New Issue
Block a user