summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-09-27 10:27:22 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2024-09-27 10:27:22 -0700
commiteee280841e1c8188fe9af5536c193d07d184e874 (patch)
tree65cca9e55a5e4553463e50f1a11fccf28f75af75 /mm
parent3630400697a3d334a391c1dba1b601d852145f2c (diff)
parent2af148ef8549a12f8025286b8825c2833ee6bcb8 (diff)
Merge tag 'mm-hotfixes-stable-2024-09-27-09-45' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull misc fixes from Andrew Morton: "19 hotfixes. 13 are cc:stable. There's a focus on fixes for the memfd_pin_folios() work which was added into 6.11. Apart from that, the usual shower of singleton fixes" * tag 'mm-hotfixes-stable-2024-09-27-09-45' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: ocfs2: fix uninit-value in ocfs2_get_block() zram: don't free statically defined names memory tiers: use default_dram_perf_ref_source in log message Revert "list: test: fix tests for list_cut_position()" kselftests: mm: fix wrong __NR_userfaultfd value compiler.h: specify correct attribute for .rodata..c_jump_table mm/damon/Kconfig: update DAMON doc URL mm: kfence: fix elapsed time for allocated/freed track ocfs2: fix deadlock in ocfs2_get_system_file_inode ocfs2: reserve space for inline xattr before attaching reflink tree mm: migrate: annotate data-race in migrate_folio_unmap() mm/hugetlb: simplify refs in memfd_alloc_folio mm/gup: fix memfd_pin_folios alloc race panic mm/gup: fix memfd_pin_folios hugetlb page allocation mm/hugetlb: fix memfd_pin_folios resv_huge_pages leak mm/hugetlb: fix memfd_pin_folios free_huge_pages leak mm/filemap: fix filemap_get_folios_contig THP panic mm: make SPLIT_PTE_PTLOCKS depend on SMP tools: fix shared radix-tree build
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig1
-rw-r--r--mm/damon/Kconfig2
-rw-r--r--mm/filemap.c4
-rw-r--r--mm/gup.c1
-rw-r--r--mm/hugetlb.c17
-rw-r--r--mm/kfence/report.c2
-rw-r--r--mm/memfd.c18
-rw-r--r--mm/memory-tiers.c6
-rw-r--r--mm/migrate.c2
9 files changed, 39 insertions, 14 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index 09aebca1cae3..4c9f5ea13271 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -595,6 +595,7 @@ config ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE
config SPLIT_PTE_PTLOCKS
def_bool y
depends on MMU
+ depends on SMP
depends on NR_CPUS >= 4
depends on !ARM || CPU_CACHE_VIPT
depends on !PARISC || PA20
diff --git a/mm/damon/Kconfig b/mm/damon/Kconfig
index fecb8172410c..35b72f88983a 100644
--- a/mm/damon/Kconfig
+++ b/mm/damon/Kconfig
@@ -9,7 +9,7 @@ config DAMON
access frequency of each memory region. The information can be useful
for performance-centric DRAM level memory management.
- See https://damonitor.github.io/doc/html/latest-damon/index.html for
+ See https://www.kernel.org/doc/html/latest/mm/damon/index.html for
more information.
config DAMON_KUNIT_TEST
diff --git a/mm/filemap.c b/mm/filemap.c
index bbaed3dd5049..36d22968be9a 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2196,6 +2196,10 @@ unsigned filemap_get_folios_contig(struct address_space *mapping,
if (xa_is_value(folio))
goto update_start;
+ /* If we landed in the middle of a THP, continue at its end. */
+ if (xa_is_sibling(folio))
+ goto update_start;
+
if (!folio_try_get(folio))
goto retry;
diff --git a/mm/gup.c b/mm/gup.c
index 8232c8c9c372..a82890b46a36 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -3700,6 +3700,7 @@ long memfd_pin_folios(struct file *memfd, loff_t start, loff_t end,
ret = PTR_ERR(folio);
if (ret != -EEXIST)
goto err;
+ folio = NULL;
}
}
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index def84d8bcf2d..190fa05635f4 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2390,6 +2390,23 @@ struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
return folio;
}
+struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
+ nodemask_t *nmask, gfp_t gfp_mask)
+{
+ struct folio *folio;
+
+ spin_lock_irq(&hugetlb_lock);
+ folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, preferred_nid,
+ nmask);
+ if (folio) {
+ VM_BUG_ON(!h->resv_huge_pages);
+ h->resv_huge_pages--;
+ }
+
+ spin_unlock_irq(&hugetlb_lock);
+ return folio;
+}
+
/* folio migration callback function */
struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask, gfp_t gfp_mask, bool allow_alloc_fallback)
diff --git a/mm/kfence/report.c b/mm/kfence/report.c
index 451991a3a8f2..6370c5207d1a 100644
--- a/mm/kfence/report.c
+++ b/mm/kfence/report.c
@@ -109,7 +109,7 @@ static void kfence_print_stack(struct seq_file *seq, const struct kfence_metadat
const struct kfence_track *track = show_alloc ? &meta->alloc_track : &meta->free_track;
u64 ts_sec = track->ts_nsec;
unsigned long rem_nsec = do_div(ts_sec, NSEC_PER_SEC);
- u64 interval_nsec = local_clock() - meta->alloc_track.ts_nsec;
+ u64 interval_nsec = local_clock() - track->ts_nsec;
unsigned long rem_interval_nsec = do_div(interval_nsec, NSEC_PER_SEC);
/* Timestamp matches printk timestamp format. */
diff --git a/mm/memfd.c b/mm/memfd.c
index e7b7c5294d59..c17c3ea701a1 100644
--- a/mm/memfd.c
+++ b/mm/memfd.c
@@ -79,23 +79,25 @@ struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx)
* alloc from. Also, the folio will be pinned for an indefinite
* amount of time, so it is not expected to be migrated away.
*/
- gfp_mask = htlb_alloc_mask(hstate_file(memfd));
+ struct hstate *h = hstate_file(memfd);
+
+ gfp_mask = htlb_alloc_mask(h);
gfp_mask &= ~(__GFP_HIGHMEM | __GFP_MOVABLE);
+ idx >>= huge_page_order(h);
- folio = alloc_hugetlb_folio_nodemask(hstate_file(memfd),
- numa_node_id(),
- NULL,
- gfp_mask,
- false);
- if (folio && folio_try_get(folio)) {
+ folio = alloc_hugetlb_folio_reserve(h,
+ numa_node_id(),
+ NULL,
+ gfp_mask);
+ if (folio) {
err = hugetlb_add_to_page_cache(folio,
memfd->f_mapping,
idx);
if (err) {
folio_put(folio);
- free_huge_folio(folio);
return ERR_PTR(err);
}
+ folio_unlock(folio);
return folio;
}
return ERR_PTR(-ENOMEM);
diff --git a/mm/memory-tiers.c b/mm/memory-tiers.c
index 9842acebd05e..fc14fe53e9b7 100644
--- a/mm/memory-tiers.c
+++ b/mm/memory-tiers.c
@@ -768,10 +768,10 @@ int mt_set_default_dram_perf(int nid, struct access_coordinate *perf,
pr_info(
"memory-tiers: the performance of DRAM node %d mismatches that of the reference\n"
"DRAM node %d.\n", nid, default_dram_perf_ref_nid);
- pr_info(" performance of reference DRAM node %d:\n",
- default_dram_perf_ref_nid);
+ pr_info(" performance of reference DRAM node %d from %s:\n",
+ default_dram_perf_ref_nid, default_dram_perf_ref_source);
dump_hmem_attrs(&default_dram_perf, " ");
- pr_info(" performance of DRAM node %d:\n", nid);
+ pr_info(" performance of DRAM node %d from %s:\n", nid, source);
dump_hmem_attrs(perf, " ");
pr_info(
" disable default DRAM node performance based abstract distance algorithm.\n");
diff --git a/mm/migrate.c b/mm/migrate.c
index dfdb3a136bf8..df91248755e4 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1196,7 +1196,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
int rc = -EAGAIN;
int old_page_state = 0;
struct anon_vma *anon_vma = NULL;
- bool is_lru = !__folio_test_movable(src);
+ bool is_lru = data_race(!__folio_test_movable(src));
bool locked = false;
bool dst_locked = false;