mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
mm: fix some typos in mm module
Below are some typos in the code comments: intevals ==> intervals addesses ==> addresses unavaliable ==> unavailable facor ==> factor droping ==> dropping exlusive ==> exclusive decription ==> description confict ==> conflict desriptions ==> descriptions otherwize ==> otherwise vlaue ==> value cheching ==> checking exisitng ==> existing modifed ==> modified differenciate ==> differentiate refernece ==> reference permissons ==> permissions indepdenent ==> independent spliting ==> splitting Just fix it. Link: https://lkml.kernel.org/r/20250929002608.1633825-1-jianyungao89@gmail.com Signed-off-by: jianyun.gao <jianyungao89@gmail.com> Reviewed-by: SeongJae Park <sj@kernel.org> Reviewed-by: Wei Yang <richard.weiyang@gmail.com> Reviewed-by: Dev Jain <dev.jain@arm.com> Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com> Acked-by: Chris Li <chrisl@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
37d1792548
commit
b6c46600bf
@@ -1264,7 +1264,7 @@ enum damon_sysfs_cmd {
|
||||
DAMON_SYSFS_CMD_UPDATE_SCHEMES_EFFECTIVE_QUOTAS,
|
||||
/*
|
||||
* @DAMON_SYSFS_CMD_UPDATE_TUNED_INTERVALS: Update the tuned monitoring
|
||||
* intevals.
|
||||
* intervals.
|
||||
*/
|
||||
DAMON_SYSFS_CMD_UPDATE_TUNED_INTERVALS,
|
||||
/*
|
||||
|
||||
2
mm/gup.c
2
mm/gup.c
@@ -2710,7 +2710,7 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
|
||||
*
|
||||
* *) ptes can be read atomically by the architecture.
|
||||
*
|
||||
* *) valid user addesses are below TASK_MAX_SIZE
|
||||
* *) valid user addresses are below TASK_MAX_SIZE
|
||||
*
|
||||
* The last two assumptions can be relaxed by the addition of helper functions.
|
||||
*
|
||||
|
||||
@@ -2934,7 +2934,7 @@ typedef enum {
|
||||
* NOTE: This is mostly identical to MAP_CHG_NEEDED, except
|
||||
* that currently vma_needs_reservation() has an unwanted side
|
||||
* effect to either use end() or commit() to complete the
|
||||
* transaction. Hence it needs to differenciate from NEEDED.
|
||||
* transaction. Hence it needs to differentiate from NEEDED.
|
||||
*/
|
||||
MAP_CHG_ENFORCED = 2,
|
||||
} map_chg_state;
|
||||
@@ -6007,7 +6007,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
||||
/*
|
||||
* If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
|
||||
* could defer the flush until now, since by holding i_mmap_rwsem we
|
||||
* guaranteed that the last refernece would not be dropped. But we must
|
||||
* guaranteed that the last reference would not be dropped. But we must
|
||||
* do the flushing before we return, as otherwise i_mmap_rwsem will be
|
||||
* dropped and the last reference to the shared PMDs page might be
|
||||
* dropped as well.
|
||||
@@ -7193,7 +7193,7 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
|
||||
} else if (unlikely(is_pte_marker(pte))) {
|
||||
/*
|
||||
* Do nothing on a poison marker; page is
|
||||
* corrupted, permissons do not apply. Here
|
||||
* corrupted, permissions do not apply. Here
|
||||
* pte_marker_uffd_wp()==true implies !poison
|
||||
* because they're mutual exclusive.
|
||||
*/
|
||||
|
||||
@@ -75,7 +75,7 @@ static int vmemmap_split_pmd(pmd_t *pmd, struct page *head, unsigned long start,
|
||||
if (likely(pmd_leaf(*pmd))) {
|
||||
/*
|
||||
* Higher order allocations from buddy allocator must be able to
|
||||
* be treated as indepdenent small pages (as they can be freed
|
||||
* be treated as independent small pages (as they can be freed
|
||||
* individually).
|
||||
*/
|
||||
if (!PageReserved(head))
|
||||
@@ -684,7 +684,7 @@ static void __hugetlb_vmemmap_optimize_folios(struct hstate *h,
|
||||
ret = hugetlb_vmemmap_split_folio(h, folio);
|
||||
|
||||
/*
|
||||
* Spliting the PMD requires allocating a page, thus lets fail
|
||||
* Splitting the PMD requires allocating a page, thus let's fail
|
||||
* early once we encounter the first OOM. No point in retrying
|
||||
* as it can be dynamically done on remap with the memory
|
||||
* we get back from the vmemmap deduplication.
|
||||
@@ -715,7 +715,7 @@ static void __hugetlb_vmemmap_optimize_folios(struct hstate *h,
|
||||
/*
|
||||
* Pages to be freed may have been accumulated. If we
|
||||
* encounter an ENOMEM, free what we have and try again.
|
||||
* This can occur in the case that both spliting fails
|
||||
* This can occur in the case that both splitting fails
|
||||
* halfway and head page allocation also failed. In this
|
||||
* case __hugetlb_vmemmap_optimize_folio() would free memory
|
||||
* allowing more vmemmap remaps to occur.
|
||||
|
||||
@@ -33,7 +33,7 @@ bool kmsan_enabled __read_mostly;
|
||||
|
||||
/*
|
||||
* Per-CPU KMSAN context to be used in interrupts, where current->kmsan is
|
||||
* unavaliable.
|
||||
* unavailable.
|
||||
*/
|
||||
DEFINE_PER_CPU(struct kmsan_ctx, kmsan_percpu_ctx);
|
||||
|
||||
|
||||
2
mm/ksm.c
2
mm/ksm.c
@@ -389,7 +389,7 @@ static unsigned long ewma(unsigned long prev, unsigned long curr)
|
||||
* exponentially weighted moving average. The new pages_to_scan value is
|
||||
* multiplied with that change factor:
|
||||
*
|
||||
* new_pages_to_scan *= change facor
|
||||
* new_pages_to_scan *= change factor
|
||||
*
|
||||
* The new_pages_to_scan value is limited by the cpu min and max values. It
|
||||
* calculates the cpu percent for the last scan and calculates the new
|
||||
|
||||
@@ -519,7 +519,7 @@ static inline void __init_node_memory_type(int node, struct memory_dev_type *mem
|
||||
* for each device getting added in the same NUMA node
|
||||
* with this specific memtype, bump the map count. We
|
||||
* Only take memtype device reference once, so that
|
||||
* changing a node memtype can be done by droping the
|
||||
* changing a node memtype can be done by dropping the
|
||||
* only reference count taken here.
|
||||
*/
|
||||
|
||||
|
||||
@@ -4328,7 +4328,7 @@ static inline bool should_try_to_free_swap(struct folio *folio,
|
||||
* If we want to map a page that's in the swapcache writable, we
|
||||
* have to detect via the refcount if we're really the exclusive
|
||||
* user. Try freeing the swapcache to get rid of the swapcache
|
||||
* reference only in case it's likely that we'll be the exlusive user.
|
||||
* reference only in case it's likely that we'll be the exclusive user.
|
||||
*/
|
||||
return (fault_flags & FAULT_FLAG_WRITE) && !folio_test_ksm(folio) &&
|
||||
folio_ref_count(folio) == (1 + folio_nr_pages(folio));
|
||||
@@ -5405,7 +5405,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *pa
|
||||
|
||||
/**
|
||||
* set_pte_range - Set a range of PTEs to point to pages in a folio.
|
||||
* @vmf: Fault decription.
|
||||
* @vmf: Fault description.
|
||||
* @folio: The folio that contains @page.
|
||||
* @page: The first page to create a PTE for.
|
||||
* @nr: The number of PTEs to create.
|
||||
|
||||
@@ -227,7 +227,7 @@ SYSCALL_DEFINE1(memfd_secret, unsigned int, flags)
|
||||
struct file *file;
|
||||
int fd, err;
|
||||
|
||||
/* make sure local flags do not confict with global fcntl.h */
|
||||
/* make sure local flags do not conflict with global fcntl.h */
|
||||
BUILD_BUG_ON(SECRETMEM_FLAGS_MASK & O_CLOEXEC);
|
||||
|
||||
if (!secretmem_enable || !can_set_direct_map())
|
||||
|
||||
@@ -259,7 +259,7 @@ out:
|
||||
* @object_size: The size of objects to be created in this cache.
|
||||
* @args: Additional arguments for the cache creation (see
|
||||
* &struct kmem_cache_args).
|
||||
* @flags: See the desriptions of individual flags. The common ones are listed
|
||||
* @flags: See the descriptions of individual flags. The common ones are listed
|
||||
* in the description below.
|
||||
*
|
||||
* Not to be called directly, use the kmem_cache_create() wrapper with the same
|
||||
|
||||
@@ -2533,7 +2533,7 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init,
|
||||
memset((char *)kasan_reset_tag(x) + inuse, 0,
|
||||
s->size - inuse - rsize);
|
||||
/*
|
||||
* Restore orig_size, otherwize kmalloc redzone overwritten
|
||||
* Restore orig_size, otherwise kmalloc redzone overwritten
|
||||
* would be reported
|
||||
*/
|
||||
set_orig_size(s, x, orig_size);
|
||||
|
||||
@@ -1677,7 +1677,7 @@ static bool swap_entries_put_map_nr(struct swap_info_struct *si,
|
||||
|
||||
/*
|
||||
* Check if it's the last ref of swap entry in the freeing path.
|
||||
* Qualified vlaue includes 1, SWAP_HAS_CACHE or SWAP_MAP_SHMEM.
|
||||
* Qualified value includes 1, SWAP_HAS_CACHE or SWAP_MAP_SHMEM.
|
||||
*/
|
||||
static inline bool __maybe_unused swap_is_last_ref(unsigned char count)
|
||||
{
|
||||
|
||||
@@ -1578,7 +1578,7 @@ static int validate_move_areas(struct userfaultfd_ctx *ctx,
|
||||
|
||||
/*
|
||||
* For now, we keep it simple and only move between writable VMAs.
|
||||
* Access flags are equal, therefore cheching only the source is enough.
|
||||
* Access flags are equal, therefore checking only the source is enough.
|
||||
*/
|
||||
if (!(src_vma->vm_flags & VM_WRITE))
|
||||
return -EINVAL;
|
||||
|
||||
4
mm/vma.c
4
mm/vma.c
@@ -109,7 +109,7 @@ static inline bool is_mergeable_vma(struct vma_merge_struct *vmg, bool merge_nex
|
||||
static bool is_mergeable_anon_vma(struct vma_merge_struct *vmg, bool merge_next)
|
||||
{
|
||||
struct vm_area_struct *tgt = merge_next ? vmg->next : vmg->prev;
|
||||
struct vm_area_struct *src = vmg->middle; /* exisitng merge case. */
|
||||
struct vm_area_struct *src = vmg->middle; /* existing merge case. */
|
||||
struct anon_vma *tgt_anon = tgt->anon_vma;
|
||||
struct anon_vma *src_anon = vmg->anon_vma;
|
||||
|
||||
@@ -798,7 +798,7 @@ static bool can_merge_remove_vma(struct vm_area_struct *vma)
|
||||
* Returns: The merged VMA if merge succeeds, or NULL otherwise.
|
||||
*
|
||||
* ASSUMPTIONS:
|
||||
* - The caller must assign the VMA to be modifed to @vmg->middle.
|
||||
* - The caller must assign the VMA to be modified to @vmg->middle.
|
||||
* - The caller must have set @vmg->prev to the previous VMA, if there is one.
|
||||
* - The caller must not set @vmg->next, as we determine this.
|
||||
* - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
|
||||
|
||||
Reference in New Issue
Block a user