mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
mm/huge_memory: move more common code into insert_pud()
Let's clean it all further up. No functional change intended. Link: https://lkml.kernel.org/r/20250811112631.759341-3-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Oscar Salvador <osalvador@suse.de> Reviewed-by: Alistair Popple <apopple@nvidia.com> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Reviewed-by: Wei Yang <richard.weiyang@gmail.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: Barry Song <baohua@kernel.org> Cc: Christian Brauner <brauner@kernel.org> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Dan Williams <dan.j.williams@intel.com> Cc: David Vrabel <david.vrabel@citrix.com> Cc: Dev Jain <dev.jain@arm.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jan Kara <jack@suse.cz> Cc: Jann Horn <jannh@google.com> Cc: Juegren Gross <jgross@suse.com> Cc: Lance Yang <lance.yang@linux.dev> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mariano Pache <npache@redhat.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Rapoport <rppt@kernel.org> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Stefano Stabellini <sstabellini@kernel.org> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Zi Yan <ziy@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
b7298e418e
commit
77e493280e
@@ -1507,25 +1507,30 @@ static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
|
||||
return pud;
|
||||
}
|
||||
|
||||
static void insert_pud(struct vm_area_struct *vma, unsigned long addr,
|
||||
static vm_fault_t insert_pud(struct vm_area_struct *vma, unsigned long addr,
|
||||
pud_t *pud, struct folio_or_pfn fop, pgprot_t prot, bool write)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
spinlock_t *ptl;
|
||||
pud_t entry;
|
||||
|
||||
if (addr < vma->vm_start || addr >= vma->vm_end)
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
ptl = pud_lock(mm, pud);
|
||||
if (!pud_none(*pud)) {
|
||||
const unsigned long pfn = fop.is_folio ? folio_pfn(fop.folio) :
|
||||
fop.pfn;
|
||||
|
||||
if (write) {
|
||||
if (WARN_ON_ONCE(pud_pfn(*pud) != pfn))
|
||||
return;
|
||||
goto out_unlock;
|
||||
entry = pud_mkyoung(*pud);
|
||||
entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
|
||||
if (pudp_set_access_flags(vma, addr, pud, entry, 1))
|
||||
update_mmu_cache_pud(vma, addr, pud);
|
||||
}
|
||||
return;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (fop.is_folio) {
|
||||
@@ -1544,6 +1549,9 @@ static void insert_pud(struct vm_area_struct *vma, unsigned long addr,
|
||||
}
|
||||
set_pud_at(mm, addr, pud, entry);
|
||||
update_mmu_cache_pud(vma, addr, pud);
|
||||
out_unlock:
|
||||
spin_unlock(ptl);
|
||||
return VM_FAULT_NOPAGE;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1565,7 +1573,6 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, unsigned long pfn,
|
||||
struct folio_or_pfn fop = {
|
||||
.pfn = pfn,
|
||||
};
|
||||
spinlock_t *ptl;
|
||||
|
||||
/*
|
||||
* If we had pud_special, we could avoid all these restrictions,
|
||||
@@ -1577,16 +1584,9 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, unsigned long pfn,
|
||||
(VM_PFNMAP|VM_MIXEDMAP));
|
||||
BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
|
||||
|
||||
if (addr < vma->vm_start || addr >= vma->vm_end)
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
pfnmap_setup_cachemode_pfn(pfn, &pgprot);
|
||||
|
||||
ptl = pud_lock(vma->vm_mm, vmf->pud);
|
||||
insert_pud(vma, addr, vmf->pud, fop, pgprot, write);
|
||||
spin_unlock(ptl);
|
||||
|
||||
return VM_FAULT_NOPAGE;
|
||||
return insert_pud(vma, addr, vmf->pud, fop, pgprot, write);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
|
||||
|
||||
@@ -1603,25 +1603,15 @@ vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio,
|
||||
{
|
||||
struct vm_area_struct *vma = vmf->vma;
|
||||
unsigned long addr = vmf->address & PUD_MASK;
|
||||
pud_t *pud = vmf->pud;
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
struct folio_or_pfn fop = {
|
||||
.folio = folio,
|
||||
.is_folio = true,
|
||||
};
|
||||
spinlock_t *ptl;
|
||||
|
||||
if (addr < vma->vm_start || addr >= vma->vm_end)
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
if (WARN_ON_ONCE(folio_order(folio) != PUD_ORDER))
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
ptl = pud_lock(mm, pud);
|
||||
insert_pud(vma, addr, vmf->pud, fop, vma->vm_page_prot, write);
|
||||
spin_unlock(ptl);
|
||||
|
||||
return VM_FAULT_NOPAGE;
|
||||
return insert_pud(vma, addr, vmf->pud, fop, vma->vm_page_prot, write);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vmf_insert_folio_pud);
|
||||
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
|
||||
|
||||
Reference in New Issue
Block a user