summaryrefslogtreecommitdiff
path: root/mm/truncate.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/truncate.c')
-rw-r--r--mm/truncate.c70
1 files changed, 32 insertions, 38 deletions
diff --git a/mm/truncate.c b/mm/truncate.c
index 581977d2356f..4d61fbdd4b2f 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -39,12 +39,25 @@ static inline void __clear_shadow_entry(struct address_space *mapping,
xas_store(&xas, NULL);
}
-static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
- void *entry)
+static void clear_shadow_entries(struct address_space *mapping,
+ struct folio_batch *fbatch, pgoff_t *indices)
{
+ int i;
+
+ /* Handled by shmem itself, or for DAX we do nothing. */
+ if (shmem_mapping(mapping) || dax_mapping(mapping))
+ return;
+
spin_lock(&mapping->host->i_lock);
xa_lock_irq(&mapping->i_pages);
- __clear_shadow_entry(mapping, index, entry);
+
+ for (i = 0; i < folio_batch_count(fbatch); i++) {
+ struct folio *folio = fbatch->folios[i];
+
+ if (xa_is_value(folio))
+ __clear_shadow_entry(mapping, indices[i], folio);
+ }
+
xa_unlock_irq(&mapping->i_pages);
if (mapping_shrinkable(mapping))
inode_add_lru(mapping->host);
@@ -105,36 +118,6 @@ static void truncate_folio_batch_exceptionals(struct address_space *mapping,
fbatch->nr = j;
}
-/*
- * Invalidate exceptional entry if easily possible. This handles exceptional
- * entries for invalidate_inode_pages().
- */
-static int invalidate_exceptional_entry(struct address_space *mapping,
- pgoff_t index, void *entry)
-{
- /* Handled by shmem itself, or for DAX we do nothing. */
- if (shmem_mapping(mapping) || dax_mapping(mapping))
- return 1;
- clear_shadow_entry(mapping, index, entry);
- return 1;
-}
-
-/*
- * Invalidate exceptional entry if clean. This handles exceptional entries for
- * invalidate_inode_pages2() so for DAX it evicts only clean entries.
- */
-static int invalidate_exceptional_entry2(struct address_space *mapping,
- pgoff_t index, void *entry)
-{
- /* Handled by shmem itself */
- if (shmem_mapping(mapping))
- return 1;
- if (dax_mapping(mapping))
- return dax_invalidate_mapping_entry_sync(mapping, index);
- clear_shadow_entry(mapping, index, entry);
- return 1;
-}
-
/**
* folio_invalidate - Invalidate part or all of a folio.
* @folio: The folio which is affected.
@@ -495,6 +478,7 @@ unsigned long mapping_try_invalidate(struct address_space *mapping,
unsigned long ret;
unsigned long count = 0;
int i;
+ bool xa_has_values = false;
folio_batch_init(&fbatch);
while (find_lock_entries(mapping, &index, end, &fbatch, indices)) {
@@ -504,8 +488,8 @@ unsigned long mapping_try_invalidate(struct address_space *mapping,
/* We rely upon deletion not changing folio->index */
if (xa_is_value(folio)) {
- count += invalidate_exceptional_entry(mapping,
- indices[i], folio);
+ xa_has_values = true;
+ count++;
continue;
}
@@ -523,6 +507,10 @@ unsigned long mapping_try_invalidate(struct address_space *mapping,
}
count += ret;
}
+
+ if (xa_has_values)
+ clear_shadow_entries(mapping, &fbatch, indices);
+
folio_batch_remove_exceptionals(&fbatch);
folio_batch_release(&fbatch);
cond_resched();
@@ -555,7 +543,7 @@ EXPORT_SYMBOL(invalidate_mapping_pages);
* This is like mapping_evict_folio(), except it ignores the folio's
* refcount. We do this because invalidate_inode_pages2() needs stronger
* invalidation guarantees, and cannot afford to leave folios behind because
- * shrink_page_list() has a temp ref on them, or because they're transiently
+ * shrink_folio_list() has a temp ref on them, or because they're transiently
* sitting in the folio_add_lru() caches.
*/
static int invalidate_complete_folio2(struct address_space *mapping,
@@ -617,6 +605,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
int ret = 0;
int ret2 = 0;
int did_range_unmap = 0;
+ bool xa_has_values = false;
if (mapping_empty(mapping))
return 0;
@@ -630,8 +619,9 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
/* We rely upon deletion not changing folio->index */
if (xa_is_value(folio)) {
- if (!invalidate_exceptional_entry2(mapping,
- indices[i], folio))
+ xa_has_values = true;
+ if (dax_mapping(mapping) &&
+ !dax_invalidate_mapping_entry_sync(mapping, indices[i]))
ret = -EBUSY;
continue;
}
@@ -667,6 +657,10 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
ret = ret2;
folio_unlock(folio);
}
+
+ if (xa_has_values)
+ clear_shadow_entries(mapping, &fbatch, indices);
+
folio_batch_remove_exceptionals(&fbatch);
folio_batch_release(&fbatch);
cond_resched();