diff options
author | Lu Jialin <lujialin4@huawei.com> | 2021-05-06 18:06:50 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-05-07 00:26:35 -0700 |
commit | baf2f90ba416cd887d7f54cc877d8764f6775de2 (patch) | |
tree | 5ad2c297572357f88a1140b822f2d1180d827460 | |
parent | f0953a1bbaca71e1ebbcb9864eb1b273156157ed (diff) |
mm: fix typos in comments
succed -> succeed in mm/hugetlb.c
wil -> will in mm/mempolicy.c
wit -> with in mm/page_alloc.c
Retruns -> Returns in mm/page_vma_mapped.c
confict -> conflict in mm/secretmem.c
No functionality changed.
Link: https://lkml.kernel.org/r/20210408140027.60623-1-lujialin4@huawei.com
Signed-off-by: Lu Jialin <lujialin4@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/mempolicy.c | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 2 | ||||
-rw-r--r-- | mm/page_vma_mapped.c | 2 |
3 files changed, 3 insertions, 3 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 5690513c5668..d79fa299b70c 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -994,7 +994,7 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask, if (flags & MPOL_F_ADDR) { /* * Take a refcount on the mpol, lookup_node() - * wil drop the mmap_lock, so after calling + * will drop the mmap_lock, so after calling * lookup_node() only "pol" remains valid, "vma" * is stale. */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 0582c85da08c..aaa1655cf682 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4173,7 +4173,7 @@ out: } /* - * Maximum number of compaction retries wit a progress before OOM + * Maximum number of compaction retries with a progress before OOM * killer is consider as the only way to move forward. */ #define MAX_COMPACT_RETRIES 16 diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index 86e3a3688d59..2cf01d933f13 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c @@ -134,7 +134,7 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw) * regardless of which page table level the page is mapped at. @pvmw->pmd is * NULL. * - * Retruns false if there are no more page table entries for the page in + * Returns false if there are no more page table entries for the page in * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped. * * If you need to stop the walk before page_vma_mapped_walk() returned false, |