diff options
-rw-r--r-- | .mailmap | 3 | ||||
-rw-r--r-- | MAINTAINERS | 4 | ||||
-rw-r--r-- | include/linux/cgroup.h | 1 | ||||
-rw-r--r-- | include/linux/swapops.h | 8 | ||||
-rw-r--r-- | kernel/cgroup/cgroup-internal.h | 1 | ||||
-rw-r--r-- | mm/gup.c | 2 | ||||
-rw-r--r-- | mm/memcontrol.c | 15 | ||||
-rw-r--r-- | mm/mmap.c | 14 | ||||
-rw-r--r-- | mm/shmem.c | 11 | ||||
-rw-r--r-- | tools/testing/selftests/cgroup/test_kmem.c | 6 |
10 files changed, 43 insertions, 22 deletions
@@ -287,6 +287,7 @@ Matthew Wilcox <willy@infradead.org> <willy@linux.intel.com> Matthew Wilcox <willy@infradead.org> <willy@parisc-linux.org> Matthias Fuchs <socketcan@esd.eu> <matthias.fuchs@esd.eu> Matthieu CASTET <castet.matthieu@free.fr> +Matti Vaittinen <mazziesaccount@gmail.com> <matti.vaittinen@fi.rohmeurope.com> Matt Ranostay <matt.ranostay@konsulko.com> <matt@ranostay.consulting> Matt Ranostay <mranostay@gmail.com> Matthew Ranostay <mranostay@embeddedalley.com> Matt Ranostay <mranostay@gmail.com> <matt.ranostay@intel.com> @@ -372,6 +373,8 @@ Ricardo Ribalda <ribalda@kernel.org> <ricardo.ribalda@gmail.com> Roman Gushchin <roman.gushchin@linux.dev> <guro@fb.com> Roman Gushchin <roman.gushchin@linux.dev> <guroan@gmail.com> Roman Gushchin <roman.gushchin@linux.dev> <klamm@yandex-team.ru> +Muchun Song <muchun.song@linux.dev> <songmuchun@bytedance.com> +Muchun Song <muchun.song@linux.dev> <smuchun@gmail.com> Ross Zwisler <zwisler@kernel.org> <ross.zwisler@linux.intel.com> Rudolf Marek <R.Marek@sh.cvut.cz> Rui Saraiva <rmps@joel.ist.utl.pt> diff --git a/MAINTAINERS b/MAINTAINERS index 7b6b8034811c..6044301ee9bd 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5299,7 +5299,7 @@ M: Johannes Weiner <hannes@cmpxchg.org> M: Michal Hocko <mhocko@kernel.org> M: Roman Gushchin <roman.gushchin@linux.dev> M: Shakeel Butt <shakeelb@google.com> -R: Muchun Song <songmuchun@bytedance.com> +R: Muchun Song <muchun.song@linux.dev> L: cgroups@vger.kernel.org L: linux-mm@kvack.org S: Maintained @@ -9443,7 +9443,7 @@ F: drivers/net/ethernet/huawei/hinic/ HUGETLB SUBSYSTEM M: Mike Kravetz <mike.kravetz@oracle.com> -M: Muchun Song <songmuchun@bytedance.com> +M: Muchun Song <muchun.song@linux.dev> L: linux-mm@kvack.org S: Maintained F: Documentation/ABI/testing/sysfs-kernel-mm-hugepages diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 528bd44b59e2..2b7d077de7ef 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -68,6 +68,7 @@ struct css_task_iter { struct list_head iters_node; /* css_set->task_iters */ }; +extern struct file_system_type cgroup_fs_type; extern struct cgroup_root cgrp_dfl_root; extern struct css_set init_css_set; diff --git a/include/linux/swapops.h b/include/linux/swapops.h index 27ade4f22abb..b982dd614572 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -33,11 +33,13 @@ * can use the extra bits to store other information besides PFN. */ #ifdef MAX_PHYSMEM_BITS -#define SWP_PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT) +#define SWP_PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT) #else /* MAX_PHYSMEM_BITS */ -#define SWP_PFN_BITS (BITS_PER_LONG - PAGE_SHIFT) +#define SWP_PFN_BITS min_t(int, \ + sizeof(phys_addr_t) * 8 - PAGE_SHIFT, \ + SWP_TYPE_SHIFT) #endif /* MAX_PHYSMEM_BITS */ -#define SWP_PFN_MASK (BIT(SWP_PFN_BITS) - 1) +#define SWP_PFN_MASK (BIT(SWP_PFN_BITS) - 1) /** * Migration swap entry specific bitfield definitions. Layout: diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h index fd4020835ec6..367b0a42ada9 100644 --- a/kernel/cgroup/cgroup-internal.h +++ b/kernel/cgroup/cgroup-internal.h @@ -167,7 +167,6 @@ struct cgroup_mgctx { extern spinlock_t css_set_lock; extern struct cgroup_subsys *cgroup_subsys[]; extern struct list_head cgroup_roots; -extern struct file_system_type cgroup_fs_type; /* iterate across the hierarchies */ #define for_each_root(root) \ @@ -2767,7 +2767,7 @@ static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned lo next = pud_addr_end(addr, end); if (unlikely(!pud_present(pud))) return 0; - if (unlikely(pud_huge(pud))) { + if (unlikely(pud_huge(pud) || pud_devmap(pud))) { if (!gup_huge_pud(pud, pudp, addr, next, flags, pages, nr)) return 0; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 23750cec0036..2c7a91689fef 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -4836,6 +4836,7 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of, unsigned int efd, cfd; struct fd efile; struct fd cfile; + struct dentry *cdentry; const char *name; char *endp; int ret; @@ -4890,6 +4891,16 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of, goto out_put_cfile; /* + * The control file must be a regular cgroup1 file. As a regular cgroup + * file can't be renamed, it's safe to access its name afterwards. + */ + cdentry = cfile.file->f_path.dentry; + if (cdentry->d_sb->s_type != &cgroup_fs_type || !d_is_reg(cdentry)) { + ret = -EINVAL; + goto out_put_cfile; + } + + /* * Determine the event callbacks and set them in @event. This used * to be done via struct cftype but cgroup core no longer knows * about these events. The following is crude but the whole thing @@ -4897,7 +4908,7 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of, * * DO NOT ADD NEW FILES. */ - name = cfile.file->f_path.dentry->d_name.name; + name = cdentry->d_name.name; if (!strcmp(name, "memory.usage_in_bytes")) { event->register_event = mem_cgroup_usage_register_event; @@ -4921,7 +4932,7 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of, * automatically removed on cgroup destruction but the removal is * asynchronous, so take an extra ref on @css. */ - cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent, + cfile_css = css_tryget_online_from_dir(cdentry->d_parent, &memory_cgrp_subsys); ret = -EINVAL; if (IS_ERR(cfile_css)) diff --git a/mm/mmap.c b/mm/mmap.c index 4624ff3ded29..1741273ac34c 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -226,8 +226,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) /* Search one past newbrk */ mas_set(&mas, newbrk); brkvma = mas_find(&mas, oldbrk); - BUG_ON(brkvma == NULL); - if (brkvma->vm_start >= oldbrk) + if (!brkvma || brkvma->vm_start >= oldbrk) goto out; /* mapping intersects with an existing non-brk vma. */ /* * mm->brk must be protected by write mmap_lock. @@ -2949,9 +2948,9 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct *vma, * Expand the existing vma if possible; Note that singular lists do not * occur after forking, so the expand will only happen on new VMAs. */ - if (vma && - (!vma->anon_vma || list_is_singular(&vma->anon_vma_chain)) && - ((vma->vm_flags & ~VM_SOFTDIRTY) == flags)) { + if (vma && vma->vm_end == addr && !vma_policy(vma) && + can_vma_merge_after(vma, flags, NULL, NULL, + addr >> PAGE_SHIFT, NULL_VM_UFFD_CTX, NULL)) { mas_set_range(mas, vma->vm_start, addr + len - 1); if (mas_preallocate(mas, vma, GFP_KERNEL)) return -ENOMEM; @@ -3038,11 +3037,6 @@ int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags) goto munmap_failed; vma = mas_prev(&mas, 0); - if (!vma || vma->vm_end != addr || vma_policy(vma) || - !can_vma_merge_after(vma, flags, NULL, NULL, - addr >> PAGE_SHIFT, NULL_VM_UFFD_CTX, NULL)) - vma = NULL; - ret = do_brk_flags(&mas, vma, addr, len, flags); populate = ((mm->def_flags & VM_LOCKED) != 0); mmap_write_unlock(mm); diff --git a/mm/shmem.c b/mm/shmem.c index f418d21205be..17adb7f6f6e4 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -950,6 +950,15 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, cond_resched(); } + /* + * When undoing a failed fallocate, we want none of the partial folio + * zeroing and splitting below, but shall want to truncate the whole + * folio when !uptodate indicates that it was added by this fallocate, + * even when [lstart, lend] covers only a part of the folio. + */ + if (unfalloc) + goto whole_folios; + same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT); folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT); if (folio) { @@ -975,6 +984,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, folio_put(folio); } +whole_folios: + index = start; while (index < end) { cond_resched(); diff --git a/tools/testing/selftests/cgroup/test_kmem.c b/tools/testing/selftests/cgroup/test_kmem.c index 22b31ebb3513..258ddc565deb 100644 --- a/tools/testing/selftests/cgroup/test_kmem.c +++ b/tools/testing/selftests/cgroup/test_kmem.c @@ -19,12 +19,12 @@ /* - * Memory cgroup charging is performed using percpu batches 32 pages + * Memory cgroup charging is performed using percpu batches 64 pages * big (look at MEMCG_CHARGE_BATCH), whereas memory.stat is exact. So * the maximum discrepancy between charge and vmstat entries is number - * of cpus multiplied by 32 pages. + * of cpus multiplied by 64 pages. */ -#define MAX_VMSTAT_ERROR (4096 * 32 * get_nprocs()) +#define MAX_VMSTAT_ERROR (4096 * 64 * get_nprocs()) static int alloc_dcache(const char *cgroup, void *arg) |