mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
For now, including <asm/pgalloc.h> instead of <linux/pgalloc.h> is technically fine unless the .c file calls p*d_populate_kernel() helper functions. But it is a better practice to always include <linux/pgalloc.h>. Include <linux/pgalloc.h> instead of <asm/pgalloc.h> outside arch/. Link: https://lkml.kernel.org/r/20251024113047.119058-3-harry.yoo@oracle.com Signed-off-by: Harry Yoo <harry.yoo@oracle.com> Acked-by: David Hildenbrand <david@redhat.com> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Reviewed-by: Mike Rapoport (Microsoft) <rppt@kernel.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: SeongJae Park <sj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
73 lines
1.5 KiB
C
73 lines
1.5 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#include <linux/hugetlb.h>
|
|
#include <linux/pgalloc.h>
|
|
|
|
#include <asm-generic/tlb.h>
|
|
|
|
#include "internal.h"
|
|
|
|
bool reclaim_pt_is_enabled(unsigned long start, unsigned long end,
|
|
struct zap_details *details)
|
|
{
|
|
return details && details->reclaim_pt && (end - start >= PMD_SIZE);
|
|
}
|
|
|
|
bool try_get_and_clear_pmd(struct mm_struct *mm, pmd_t *pmd, pmd_t *pmdval)
|
|
{
|
|
spinlock_t *pml = pmd_lockptr(mm, pmd);
|
|
|
|
if (!spin_trylock(pml))
|
|
return false;
|
|
|
|
*pmdval = pmdp_get_lockless(pmd);
|
|
pmd_clear(pmd);
|
|
spin_unlock(pml);
|
|
|
|
return true;
|
|
}
|
|
|
|
void free_pte(struct mm_struct *mm, unsigned long addr, struct mmu_gather *tlb,
|
|
pmd_t pmdval)
|
|
{
|
|
pte_free_tlb(tlb, pmd_pgtable(pmdval), addr);
|
|
mm_dec_nr_ptes(mm);
|
|
}
|
|
|
|
void try_to_free_pte(struct mm_struct *mm, pmd_t *pmd, unsigned long addr,
|
|
struct mmu_gather *tlb)
|
|
{
|
|
pmd_t pmdval;
|
|
spinlock_t *pml, *ptl = NULL;
|
|
pte_t *start_pte, *pte;
|
|
int i;
|
|
|
|
pml = pmd_lock(mm, pmd);
|
|
start_pte = pte_offset_map_rw_nolock(mm, pmd, addr, &pmdval, &ptl);
|
|
if (!start_pte)
|
|
goto out_ptl;
|
|
if (ptl != pml)
|
|
spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
|
|
|
|
/* Check if it is empty PTE page */
|
|
for (i = 0, pte = start_pte; i < PTRS_PER_PTE; i++, pte++) {
|
|
if (!pte_none(ptep_get(pte)))
|
|
goto out_ptl;
|
|
}
|
|
pte_unmap(start_pte);
|
|
|
|
pmd_clear(pmd);
|
|
|
|
if (ptl != pml)
|
|
spin_unlock(ptl);
|
|
spin_unlock(pml);
|
|
|
|
free_pte(mm, addr, tlb, pmdval);
|
|
|
|
return;
|
|
out_ptl:
|
|
if (start_pte)
|
|
pte_unmap_unlock(start_pte, ptl);
|
|
if (ptl != pml)
|
|
spin_unlock(pml);
|
|
}
|