diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2022-02-09 20:22:12 +0000 |
---|---|---|
committer | Matthew Wilcox (Oracle) <willy@infradead.org> | 2022-03-16 13:37:04 -0400 |
commit | e621900ad28b748e058b81d6078a5d5eb37b3973 (patch) | |
tree | 8c40fb48fbae51a8823e6038da522faccec82be1 /fs/buffer.c | |
parent | af7afdc7bbbe60cb6ce51a86b022d647e1a72717 (diff) |
fs: Convert __set_page_dirty_buffers to block_dirty_folio
Convert all callers; mostly this is just changing the aops to point
at it, but a few implementations need a little more work.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Tested-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
Acked-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
Tested-by: Mike Marshall <hubcap@omnibond.com> # orangefs
Tested-by: David Howells <dhowells@redhat.com> # afs
Diffstat (limited to 'fs/buffer.c')
-rw-r--r-- | fs/buffer.c | 33 |
1 files changed, 15 insertions, 18 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index 5fe02e5a9807..28b9739b719b 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -613,17 +613,14 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode); * FIXME: may need to call ->reservepage here as well. That's rather up to the * address_space though. */ -int __set_page_dirty_buffers(struct page *page) +bool block_dirty_folio(struct address_space *mapping, struct folio *folio) { - int newly_dirty; - struct address_space *mapping = page_mapping(page); - - if (unlikely(!mapping)) - return !TestSetPageDirty(page); + struct buffer_head *head; + bool newly_dirty; spin_lock(&mapping->private_lock); - if (page_has_buffers(page)) { - struct buffer_head *head = page_buffers(page); + head = folio_buffers(folio); + if (head) { struct buffer_head *bh = head; do { @@ -635,21 +632,21 @@ int __set_page_dirty_buffers(struct page *page) * Lock out page's memcg migration to keep PageDirty * synchronized with per-memcg dirty page counters. */ - lock_page_memcg(page); - newly_dirty = !TestSetPageDirty(page); + folio_memcg_lock(folio); + newly_dirty = !folio_test_set_dirty(folio); spin_unlock(&mapping->private_lock); if (newly_dirty) - __set_page_dirty(page, mapping, 1); + __folio_mark_dirty(folio, mapping, 1); - unlock_page_memcg(page); + folio_memcg_unlock(folio); if (newly_dirty) __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); return newly_dirty; } -EXPORT_SYMBOL(__set_page_dirty_buffers); +EXPORT_SYMBOL(block_dirty_folio); /* * Write out and wait upon a list of buffers. @@ -1548,7 +1545,7 @@ EXPORT_SYMBOL(block_invalidate_folio); /* * We attach and possibly dirty the buffers atomically wrt - * __set_page_dirty_buffers() via private_lock. try_to_free_buffers + * block_dirty_folio() via private_lock. try_to_free_buffers * is already excluded via the page lock. */ void create_empty_buffers(struct page *page, @@ -1723,12 +1720,12 @@ int __block_write_full_page(struct inode *inode, struct page *page, (1 << BH_Dirty)|(1 << BH_Uptodate)); /* - * Be very careful. We have no exclusion from __set_page_dirty_buffers + * Be very careful. We have no exclusion from block_dirty_folio * here, and the (potentially unmapped) buffers may become dirty at * any time. If a buffer becomes dirty here after we've inspected it * then we just miss that fact, and the page stays dirty. * - * Buffers outside i_size may be dirtied by __set_page_dirty_buffers; + * Buffers outside i_size may be dirtied by block_dirty_folio; * handle that here by just cleaning them. */ @@ -3182,7 +3179,7 @@ EXPORT_SYMBOL(sync_dirty_buffer); * * The same applies to regular filesystem pages: if all the buffers are * clean then we set the page clean and proceed. To do that, we require - * total exclusion from __set_page_dirty_buffers(). That is obtained with + * total exclusion from block_dirty_folio(). That is obtained with * private_lock. * * try_to_free_buffers() is non-blocking. @@ -3249,7 +3246,7 @@ int try_to_free_buffers(struct page *page) * the page also. * * private_lock must be held over this entire operation in order - * to synchronise against __set_page_dirty_buffers and prevent the + * to synchronise against block_dirty_folio and prevent the * dirty bit from being lost. */ if (ret) |