mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
mm: rename walk_page_range_mm()
Patch series "mm: perform guard region install/remove under VMA lock", v2. There is no reason why can't perform guard region operations under the VMA lock, as long we take proper precautions to ensure that we do so in a safe manner. This is fine, as VMA lock acquisition is always best-effort, so if we are unable to do so, we can simply fall back to using the mmap read lock. Doing so will reduce mmap lock contention for callers performing guard region operations and help establish a precedent of trying to use the VMA lock where possible. As part of this change we perform a trivial rename of page walk functions which bypass safety checks (i.e. whether or not mm_walk_ops->install_pte is specified) in order that we can keep naming consistent with the mm walk. This is because we need to expose a VMA-specific walk that still allows us to install PTE entries. This patch (of 2): Make it clear we're referencing an unsafe variant of this function explicitly. This is laying the foundation for exposing more such functions and maintaining a consistent naming scheme. As a part of this change, rename check_ops_valid() to check_ops_safe() for consistency. Link: https://lkml.kernel.org/r/cover.1762795245.git.lorenzo.stoakes@oracle.com Link: https://lkml.kernel.org/r/c684d91464a438d6e31172c9450416a373f10649.1762795245.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Acked-by: David Hildenbrand (Red Hat) <david@kernel.org> Reviewed-by: Davidlohr Bueso <dave@stgolabs.net> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: SeongJae Park <sj@kernel.org> Cc: Jann Horn <jannh@google.com> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Rapoport <rppt@kernel.org> Cc: Suren Baghdasaryan <surenb@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
bc8e51c05a
commit
f4af67ff4f
@@ -1649,7 +1649,7 @@ static inline void accept_page(struct page *page)
|
||||
#endif /* CONFIG_UNACCEPTED_MEMORY */
|
||||
|
||||
/* pagewalk.c */
|
||||
int walk_page_range_mm(struct mm_struct *mm, unsigned long start,
|
||||
int walk_page_range_mm_unsafe(struct mm_struct *mm, unsigned long start,
|
||||
unsigned long end, const struct mm_walk_ops *ops,
|
||||
void *private);
|
||||
int walk_page_range_debug(struct mm_struct *mm, unsigned long start,
|
||||
|
||||
@@ -1173,8 +1173,8 @@ static long madvise_guard_install(struct madvise_behavior *madv_behavior)
|
||||
unsigned long nr_pages = 0;
|
||||
|
||||
/* Returns < 0 on error, == 0 if success, > 0 if zap needed. */
|
||||
err = walk_page_range_mm(vma->vm_mm, range->start, range->end,
|
||||
&guard_install_walk_ops, &nr_pages);
|
||||
err = walk_page_range_mm_unsafe(vma->vm_mm, range->start,
|
||||
range->end, &guard_install_walk_ops, &nr_pages);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
|
||||
@@ -452,7 +452,7 @@ static inline void process_vma_walk_lock(struct vm_area_struct *vma,
|
||||
* We usually restrict the ability to install PTEs, but this functionality is
|
||||
* available to internal memory management code and provided in mm/internal.h.
|
||||
*/
|
||||
int walk_page_range_mm(struct mm_struct *mm, unsigned long start,
|
||||
int walk_page_range_mm_unsafe(struct mm_struct *mm, unsigned long start,
|
||||
unsigned long end, const struct mm_walk_ops *ops,
|
||||
void *private)
|
||||
{
|
||||
@@ -518,10 +518,10 @@ int walk_page_range_mm(struct mm_struct *mm, unsigned long start,
|
||||
* This check is performed on all functions which are parameterised by walk
|
||||
* operations and exposed in include/linux/pagewalk.h.
|
||||
*
|
||||
* Internal memory management code can use the walk_page_range_mm() function to
|
||||
* be able to use all page walking operations.
|
||||
* Internal memory management code can use *_unsafe() functions to be able to
|
||||
* use all page walking operations.
|
||||
*/
|
||||
static bool check_ops_valid(const struct mm_walk_ops *ops)
|
||||
static bool check_ops_safe(const struct mm_walk_ops *ops)
|
||||
{
|
||||
/*
|
||||
* The installation of PTEs is solely under the control of memory
|
||||
@@ -579,10 +579,10 @@ int walk_page_range(struct mm_struct *mm, unsigned long start,
|
||||
unsigned long end, const struct mm_walk_ops *ops,
|
||||
void *private)
|
||||
{
|
||||
if (!check_ops_valid(ops))
|
||||
if (!check_ops_safe(ops))
|
||||
return -EINVAL;
|
||||
|
||||
return walk_page_range_mm(mm, start, end, ops, private);
|
||||
return walk_page_range_mm_unsafe(mm, start, end, ops, private);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -639,7 +639,7 @@ int walk_kernel_page_table_range_lockless(unsigned long start, unsigned long end
|
||||
|
||||
if (start >= end)
|
||||
return -EINVAL;
|
||||
if (!check_ops_valid(ops))
|
||||
if (!check_ops_safe(ops))
|
||||
return -EINVAL;
|
||||
|
||||
return walk_pgd_range(start, end, &walk);
|
||||
@@ -678,7 +678,7 @@ int walk_page_range_debug(struct mm_struct *mm, unsigned long start,
|
||||
pgd, private);
|
||||
if (start >= end || !walk.mm)
|
||||
return -EINVAL;
|
||||
if (!check_ops_valid(ops))
|
||||
if (!check_ops_safe(ops))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
@@ -709,7 +709,7 @@ int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start,
|
||||
return -EINVAL;
|
||||
if (start < vma->vm_start || end > vma->vm_end)
|
||||
return -EINVAL;
|
||||
if (!check_ops_valid(ops))
|
||||
if (!check_ops_safe(ops))
|
||||
return -EINVAL;
|
||||
|
||||
process_mm_walk_lock(walk.mm, ops->walk_lock);
|
||||
@@ -729,7 +729,7 @@ int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
|
||||
|
||||
if (!walk.mm)
|
||||
return -EINVAL;
|
||||
if (!check_ops_valid(ops))
|
||||
if (!check_ops_safe(ops))
|
||||
return -EINVAL;
|
||||
|
||||
process_mm_walk_lock(walk.mm, ops->walk_lock);
|
||||
@@ -780,7 +780,7 @@ int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
|
||||
unsigned long start_addr, end_addr;
|
||||
int err = 0;
|
||||
|
||||
if (!check_ops_valid(ops))
|
||||
if (!check_ops_safe(ops))
|
||||
return -EINVAL;
|
||||
|
||||
lockdep_assert_held(&mapping->i_mmap_rwsem);
|
||||
|
||||
Reference in New Issue
Block a user