diff options
author | David Howells <dhowells@redhat.com> | 2024-03-19 10:00:09 +0000 |
---|---|---|
committer | David Howells <dhowells@redhat.com> | 2024-04-29 15:01:42 +0100 |
commit | 2ff1e97587f4d398686f52c07afde3faf3da4e5c (patch) | |
tree | a25925bb777929c20020df7af7eec696c61c0cce /fs/netfs | |
parent | 5f24162f873f08681804059e6de70d77c3e4cea2 (diff) |
netfs: Replace PG_fscache by setting folio->private and marking dirty
When dirty data is being written to the cache, setting/waiting on/clearing
the fscache flag is always done in tandem with setting/waiting on/clearing
the writeback flag. The netfslib buffered write routines wait on and set
both flags and the write request cleanup clears both flags, so the fscache
flag is almost superfluous.
The reason it isn't superfluous is because the fscache flag is also used to
indicate that data just read from the server is being written to the cache.
The flag is used to prevent a race involving overlapping direct-I/O writes
to the cache.
Change this to indicate that a page is in need of being copied to the cache
by placing a magic value in folio->private and marking the folios dirty.
Then when the writeback code sees a folio marked in this way, it only
writes it to the cache and not to the server.
If a folio that has this magic value set is modified, the value is just
replaced and the folio will then be uplodaded too.
With this, PG_fscache is no longer required by the netfslib core, 9p and
afs.
Ceph and nfs, however, still need to use the old PG_fscache-based tracking.
To deal with this, a flag, NETFS_ICTX_USE_PGPRIV2, now has to be set on the
flags in the netfs_inode struct for those filesystems. This reenables the
use of PG_fscache in that inode. 9p and afs use the netfslib write helpers
so get switched over; cifs, for the moment, does page-by-page manual access
to the cache, so doesn't use PG_fscache and is unaffected.
Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
cc: Matthew Wilcox (Oracle) <willy@infradead.org>
cc: Eric Van Hensbergen <ericvh@kernel.org>
cc: Latchesar Ionkov <lucho@ionkov.net>
cc: Dominique Martinet <asmadeus@codewreck.org>
cc: Christian Schoenebeck <linux_oss@crudebyte.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: Ilya Dryomov <idryomov@gmail.com>
cc: Xiubo Li <xiubli@redhat.com>
cc: Steve French <sfrench@samba.org>
cc: Paulo Alcantara <pc@manguebit.com>
cc: Ronnie Sahlberg <ronniesahlberg@gmail.com>
cc: Shyam Prasad N <sprasad@microsoft.com>
cc: Tom Talpey <tom@talpey.com>
cc: Bharath SM <bharathsm@microsoft.com>
cc: Trond Myklebust <trond.myklebust@hammerspace.com>
cc: Anna Schumaker <anna@kernel.org>
cc: netfs@lists.linux.dev
cc: v9fs@lists.linux.dev
cc: linux-afs@lists.infradead.org
cc: ceph-devel@vger.kernel.org
cc: linux-cifs@vger.kernel.org
cc: linux-nfs@vger.kernel.org
cc: linux-fsdevel@vger.kernel.org
cc: linux-mm@kvack.org
Diffstat (limited to 'fs/netfs')
-rw-r--r-- | fs/netfs/buffered_read.c | 36 | ||||
-rw-r--r-- | fs/netfs/buffered_write.c | 93 | ||||
-rw-r--r-- | fs/netfs/fscache_io.c | 12 | ||||
-rw-r--r-- | fs/netfs/internal.h | 10 | ||||
-rw-r--r-- | fs/netfs/io.c | 18 | ||||
-rw-r--r-- | fs/netfs/main.c | 1 | ||||
-rw-r--r-- | fs/netfs/misc.c | 10 | ||||
-rw-r--r-- | fs/netfs/objects.c | 6 |
8 files changed, 102 insertions, 84 deletions
diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c index 3298c29b5548..6d49319c82c6 100644 --- a/fs/netfs/buffered_read.c +++ b/fs/netfs/buffered_read.c @@ -10,8 +10,11 @@ #include "internal.h" /* - * Unlock the folios in a read operation. We need to set PG_fscache on any + * Unlock the folios in a read operation. We need to set PG_writeback on any * folios we're going to write back before we unlock them. + * + * Note that if the deprecated NETFS_RREQ_USE_PGPRIV2 is set then we use + * PG_private_2 and do a direct write to the cache from here instead. */ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq) { @@ -48,14 +51,14 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq) xas_for_each(&xas, folio, last_page) { loff_t pg_end; bool pg_failed = false; - bool folio_started; + bool wback_to_cache = false; + bool folio_started = false; if (xas_retry(&xas, folio)) continue; pg_end = folio_pos(folio) + folio_size(folio) - 1; - folio_started = false; for (;;) { loff_t sreq_end; @@ -63,10 +66,16 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq) pg_failed = true; break; } - if (!folio_started && test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) { - trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache); - folio_start_fscache(folio); - folio_started = true; + if (test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags)) { + if (!folio_started && test_bit(NETFS_SREQ_COPY_TO_CACHE, + &subreq->flags)) { + trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache); + folio_start_fscache(folio); + folio_started = true; + } + } else { + wback_to_cache |= + test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags); } pg_failed |= subreq_failed; sreq_end = subreq->start + subreq->len - 1; @@ -98,6 +107,11 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq) kfree(finfo); } folio_mark_uptodate(folio); + if (wback_to_cache && !WARN_ON_ONCE(folio_get_private(folio) != NULL)) { + trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache); + folio_attach_private(folio, NETFS_FOLIO_COPY_TO_CACHE); + filemap_dirty_folio(folio->mapping, folio); + } } if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) { @@ -491,9 +505,11 @@ retry: netfs_put_request(rreq, false, netfs_rreq_trace_put_return); have_folio: - ret = folio_wait_fscache_killable(folio); - if (ret < 0) - goto error; + if (test_bit(NETFS_ICTX_USE_PGPRIV2, &ctx->flags)) { + ret = folio_wait_fscache_killable(folio); + if (ret < 0) + goto error; + } have_folio_no_wait: *_folio = folio; _leave(" = 0"); diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c index f7455a579f21..57c6eab01261 100644 --- a/fs/netfs/buffered_write.c +++ b/fs/netfs/buffered_write.c @@ -30,21 +30,13 @@ static void netfs_cleanup_buffered_write(struct netfs_io_request *wreq); static void netfs_set_group(struct folio *folio, struct netfs_group *netfs_group) { - if (netfs_group && !folio_get_private(folio)) - folio_attach_private(folio, netfs_get_group(netfs_group)); -} + void *priv = folio_get_private(folio); -#if IS_ENABLED(CONFIG_FSCACHE) -static void netfs_folio_start_fscache(bool caching, struct folio *folio) -{ - if (caching) - folio_start_fscache(folio); -} -#else -static void netfs_folio_start_fscache(bool caching, struct folio *folio) -{ + if (netfs_group && (!priv || priv == NETFS_FOLIO_COPY_TO_CACHE)) + folio_attach_private(folio, netfs_get_group(netfs_group)); + else if (!netfs_group && priv == NETFS_FOLIO_COPY_TO_CACHE) + folio_detach_private(folio); } -#endif /* * Decide how we should modify a folio. We might be attempting to do @@ -63,11 +55,12 @@ static enum netfs_how_to_modify netfs_how_to_modify(struct netfs_inode *ctx, bool maybe_trouble) { struct netfs_folio *finfo = netfs_folio_info(folio); + struct netfs_group *group = netfs_folio_group(folio); loff_t pos = folio_file_pos(folio); _enter(""); - if (netfs_folio_group(folio) != netfs_group) + if (group != netfs_group && group != NETFS_FOLIO_COPY_TO_CACHE) return NETFS_FLUSH_CONTENT; if (folio_test_uptodate(folio)) @@ -396,9 +389,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter, folio_clear_dirty_for_io(folio); /* We make multiple writes to the folio... */ if (!folio_test_writeback(folio)) { - folio_wait_fscache(folio); folio_start_writeback(folio); - folio_start_fscache(folio); if (wreq->iter.count == 0) trace_netfs_folio(folio, netfs_folio_trace_wthru); else @@ -528,6 +519,7 @@ EXPORT_SYMBOL(netfs_file_write_iter); */ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group) { + struct netfs_group *group; struct folio *folio = page_folio(vmf->page); struct file *file = vmf->vma->vm_file; struct inode *inode = file_inode(file); @@ -550,7 +542,8 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr goto out; } - if (netfs_folio_group(folio) != netfs_group) { + group = netfs_folio_group(folio); + if (group != netfs_group && group != NETFS_FOLIO_COPY_TO_CACHE) { folio_unlock(folio); err = filemap_fdatawait_range(inode->i_mapping, folio_pos(folio), @@ -606,8 +599,6 @@ static void netfs_kill_pages(struct address_space *mapping, trace_netfs_folio(folio, netfs_folio_trace_kill); folio_clear_uptodate(folio); - if (folio_test_fscache(folio)) - folio_end_fscache(folio); folio_end_writeback(folio); folio_lock(folio); generic_error_remove_folio(mapping, folio); @@ -643,8 +634,6 @@ static void netfs_redirty_pages(struct address_space *mapping, next = folio_next_index(folio); trace_netfs_folio(folio, netfs_folio_trace_redirty); filemap_dirty_folio(mapping, folio); - if (folio_test_fscache(folio)) - folio_end_fscache(folio); folio_end_writeback(folio); folio_put(folio); } while (index = next, index <= last); @@ -700,7 +689,11 @@ static void netfs_pages_written_back(struct netfs_io_request *wreq) if (!folio_test_dirty(folio)) { folio_detach_private(folio); gcount++; - trace_netfs_folio(folio, netfs_folio_trace_clear_g); + if (group == NETFS_FOLIO_COPY_TO_CACHE) + trace_netfs_folio(folio, + netfs_folio_trace_end_copy); + else + trace_netfs_folio(folio, netfs_folio_trace_clear_g); } else { trace_netfs_folio(folio, netfs_folio_trace_redirtied); } @@ -724,8 +717,6 @@ static void netfs_pages_written_back(struct netfs_io_request *wreq) trace_netfs_folio(folio, netfs_folio_trace_clear); } end_wb: - if (folio_test_fscache(folio)) - folio_end_fscache(folio); xas_advance(&xas, folio_next_index(folio) - 1); folio_end_writeback(folio); } @@ -795,7 +786,6 @@ static void netfs_extend_writeback(struct address_space *mapping, long *_count, loff_t start, loff_t max_len, - bool caching, size_t *_len, size_t *_top) { @@ -846,8 +836,7 @@ static void netfs_extend_writeback(struct address_space *mapping, break; } if (!folio_test_dirty(folio) || - folio_test_writeback(folio) || - folio_test_fscache(folio)) { + folio_test_writeback(folio)) { folio_unlock(folio); folio_put(folio); xas_reset(xas); @@ -860,7 +849,8 @@ static void netfs_extend_writeback(struct address_space *mapping, if ((const struct netfs_group *)priv != group) { stop = true; finfo = netfs_folio_info(folio); - if (finfo->netfs_group != group || + if (!finfo || + finfo->netfs_group != group || finfo->dirty_offset > 0) { folio_unlock(folio); folio_put(folio); @@ -894,12 +884,14 @@ static void netfs_extend_writeback(struct address_space *mapping, for (i = 0; i < folio_batch_count(&fbatch); i++) { folio = fbatch.folios[i]; - trace_netfs_folio(folio, netfs_folio_trace_store_plus); + if (group == NETFS_FOLIO_COPY_TO_CACHE) + trace_netfs_folio(folio, netfs_folio_trace_copy_plus); + else + trace_netfs_folio(folio, netfs_folio_trace_store_plus); if (!folio_clear_dirty_for_io(folio)) BUG(); folio_start_writeback(folio); - netfs_folio_start_fscache(caching, folio); folio_unlock(folio); } @@ -925,14 +917,14 @@ static ssize_t netfs_write_back_from_locked_folio(struct address_space *mapping, struct netfs_inode *ctx = netfs_inode(mapping->host); unsigned long long i_size = i_size_read(&ctx->inode); size_t len, max_len; - bool caching = netfs_is_cache_enabled(ctx); long count = wbc->nr_to_write; int ret; - _enter(",%lx,%llx-%llx,%u", folio->index, start, end, caching); + _enter(",%lx,%llx-%llx", folio->index, start, end); wreq = netfs_alloc_request(mapping, NULL, start, folio_size(folio), - NETFS_WRITEBACK); + group == NETFS_FOLIO_COPY_TO_CACHE ? + NETFS_COPY_TO_CACHE : NETFS_WRITEBACK); if (IS_ERR(wreq)) { folio_unlock(folio); return PTR_ERR(wreq); @@ -941,7 +933,6 @@ static ssize_t netfs_write_back_from_locked_folio(struct address_space *mapping, if (!folio_clear_dirty_for_io(folio)) BUG(); folio_start_writeback(folio); - netfs_folio_start_fscache(caching, folio); count -= folio_nr_pages(folio); @@ -950,7 +941,10 @@ static ssize_t netfs_write_back_from_locked_folio(struct address_space *mapping, * immediately lockable, is not dirty or is missing, or we reach the * end of the range. */ - trace_netfs_folio(folio, netfs_folio_trace_store); + if (group == NETFS_FOLIO_COPY_TO_CACHE) + trace_netfs_folio(folio, netfs_folio_trace_copy); + else + trace_netfs_folio(folio, netfs_folio_trace_store); len = wreq->len; finfo = netfs_folio_info(folio); @@ -973,7 +967,7 @@ static ssize_t netfs_write_back_from_locked_folio(struct address_space *mapping, if (len < max_len) netfs_extend_writeback(mapping, group, xas, &count, start, - max_len, caching, &len, &wreq->upper_len); + max_len, &len, &wreq->upper_len); } cant_expand: @@ -997,15 +991,18 @@ cant_expand: iov_iter_xarray(&wreq->iter, ITER_SOURCE, &mapping->i_pages, start, wreq->upper_len); - __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags); - ret = netfs_begin_write(wreq, true, netfs_write_trace_writeback); + if (group != NETFS_FOLIO_COPY_TO_CACHE) { + __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags); + ret = netfs_begin_write(wreq, true, netfs_write_trace_writeback); + } else { + ret = netfs_begin_write(wreq, true, netfs_write_trace_copy_to_cache); + } if (ret == 0 || ret == -EIOCBQUEUED) wbc->nr_to_write -= len / PAGE_SIZE; } else { _debug("write discard %zx @%llx [%llx]", len, start, i_size); /* The dirty region was entirely beyond the EOF. */ - fscache_clear_page_bits(mapping, start, len, caching); netfs_pages_written_back(wreq); ret = 0; } @@ -1058,9 +1055,11 @@ search_again: /* Skip any dirty folio that's not in the group of interest. */ priv = folio_get_private(folio); - if ((const struct netfs_group *)priv != group) { - finfo = netfs_folio_info(folio); - if (finfo->netfs_group != group) { + if ((const struct netfs_group *)priv == NETFS_FOLIO_COPY_TO_CACHE) { + group = NETFS_FOLIO_COPY_TO_CACHE; + } else if ((const struct netfs_group *)priv != group) { + finfo = __netfs_folio_info(priv); + if (!finfo || finfo->netfs_group != group) { folio_put(folio); continue; } @@ -1099,14 +1098,10 @@ lock_again: goto search_again; } - if (folio_test_writeback(folio) || - folio_test_fscache(folio)) { + if (folio_test_writeback(folio)) { folio_unlock(folio); if (wbc->sync_mode != WB_SYNC_NONE) { folio_wait_writeback(folio); -#ifdef CONFIG_FSCACHE - folio_wait_fscache(folio); -#endif goto lock_again; } @@ -1265,7 +1260,8 @@ int netfs_launder_folio(struct folio *folio) bvec_set_folio(&bvec, folio, len, offset); iov_iter_bvec(&wreq->iter, ITER_SOURCE, &bvec, 1, len); - __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags); + if (group != NETFS_FOLIO_COPY_TO_CACHE) + __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags); ret = netfs_begin_write(wreq, true, netfs_write_trace_launder); out_put: @@ -1274,7 +1270,6 @@ out_put: kfree(finfo); netfs_put_request(wreq, false, netfs_rreq_trace_put_return); out: - folio_wait_fscache(folio); _leave(" = %d", ret); return ret; } diff --git a/fs/netfs/fscache_io.c b/fs/netfs/fscache_io.c index 43a651ed8264..5028f2ae30da 100644 --- a/fs/netfs/fscache_io.c +++ b/fs/netfs/fscache_io.c @@ -166,6 +166,7 @@ struct fscache_write_request { loff_t start; size_t len; bool set_bits; + bool using_pgpriv2; netfs_io_terminated_t term_func; void *term_func_priv; }; @@ -197,8 +198,9 @@ static void fscache_wreq_done(void *priv, ssize_t transferred_or_error, { struct fscache_write_request *wreq = priv; - fscache_clear_page_bits(wreq->mapping, wreq->start, wreq->len, - wreq->set_bits); + if (wreq->using_pgpriv2) + fscache_clear_page_bits(wreq->mapping, wreq->start, wreq->len, + wreq->set_bits); if (wreq->term_func) wreq->term_func(wreq->term_func_priv, transferred_or_error, @@ -212,7 +214,7 @@ void __fscache_write_to_cache(struct fscache_cookie *cookie, loff_t start, size_t len, loff_t i_size, netfs_io_terminated_t term_func, void *term_func_priv, - bool cond) + bool using_pgpriv2, bool cond) { struct fscache_write_request *wreq; struct netfs_cache_resources *cres; @@ -230,6 +232,7 @@ void __fscache_write_to_cache(struct fscache_cookie *cookie, wreq->mapping = mapping; wreq->start = start; wreq->len = len; + wreq->using_pgpriv2 = using_pgpriv2; wreq->set_bits = cond; wreq->term_func = term_func; wreq->term_func_priv = term_func_priv; @@ -257,7 +260,8 @@ abandon_end: abandon_free: kfree(wreq); abandon: - fscache_clear_page_bits(mapping, start, len, cond); + if (using_pgpriv2) + fscache_clear_page_bits(mapping, start, len, cond); if (term_func) term_func(term_func_priv, ret, false); } diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h index ec7045d24400..156ab138e224 100644 --- a/fs/netfs/internal.h +++ b/fs/netfs/internal.h @@ -168,7 +168,7 @@ static inline bool netfs_is_cache_enabled(struct netfs_inode *ctx) */ static inline struct netfs_group *netfs_get_group(struct netfs_group *netfs_group) { - if (netfs_group) + if (netfs_group && netfs_group != NETFS_FOLIO_COPY_TO_CACHE) refcount_inc(&netfs_group->ref); return netfs_group; } @@ -178,7 +178,9 @@ static inline struct netfs_group *netfs_get_group(struct netfs_group *netfs_grou */ static inline void netfs_put_group(struct netfs_group *netfs_group) { - if (netfs_group && refcount_dec_and_test(&netfs_group->ref)) + if (netfs_group && + netfs_group != NETFS_FOLIO_COPY_TO_CACHE && + refcount_dec_and_test(&netfs_group->ref)) netfs_group->free(netfs_group); } @@ -187,7 +189,9 @@ static inline void netfs_put_group(struct netfs_group *netfs_group) */ static inline void netfs_put_group_many(struct netfs_group *netfs_group, int nr) { - if (netfs_group && refcount_sub_and_test(nr, &netfs_group->ref)) + if (netfs_group && + netfs_group != NETFS_FOLIO_COPY_TO_CACHE && + refcount_sub_and_test(nr, &netfs_group->ref)) netfs_group->free(netfs_group); } diff --git a/fs/netfs/io.c b/fs/netfs/io.c index 4261ad6c55b6..b3b9827a9709 100644 --- a/fs/netfs/io.c +++ b/fs/netfs/io.c @@ -99,8 +99,9 @@ static void netfs_rreq_completed(struct netfs_io_request *rreq, bool was_async) } /* - * Deal with the completion of writing the data to the cache. We have to clear - * the PG_fscache bits on the folios involved and release the caller's ref. + * [DEPRECATED] Deal with the completion of writing the data to the cache. We + * have to clear the PG_fscache bits on the folios involved and release the + * caller's ref. * * May be called in softirq mode and we inherit a ref from the caller. */ @@ -138,7 +139,7 @@ static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq, } static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error, - bool was_async) + bool was_async) /* [DEPRECATED] */ { struct netfs_io_subrequest *subreq = priv; struct netfs_io_request *rreq = subreq->rreq; @@ -161,8 +162,8 @@ static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error, } /* - * Perform any outstanding writes to the cache. We inherit a ref from the - * caller. + * [DEPRECATED] Perform any outstanding writes to the cache. We inherit a ref + * from the caller. */ static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq) { @@ -222,7 +223,7 @@ static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq) netfs_rreq_unmark_after_write(rreq, false); } -static void netfs_rreq_write_to_cache_work(struct work_struct *work) +static void netfs_rreq_write_to_cache_work(struct work_struct *work) /* [DEPRECATED] */ { struct netfs_io_request *rreq = container_of(work, struct netfs_io_request, work); @@ -230,7 +231,7 @@ static void netfs_rreq_write_to_cache_work(struct work_struct *work) netfs_rreq_do_write_to_cache(rreq); } -static void netfs_rreq_write_to_cache(struct netfs_io_request *rreq) +static void netfs_rreq_write_to_cache(struct netfs_io_request *rreq) /* [DEPRECATED] */ { rreq->work.func = netfs_rreq_write_to_cache_work; if (!queue_work(system_unbound_wq, &rreq->work)) @@ -409,7 +410,8 @@ again: clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags); wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS); - if (test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags)) + if (test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags) && + test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags)) return netfs_rreq_write_to_cache(rreq); netfs_rreq_completed(rreq, was_async); diff --git a/fs/netfs/main.c b/fs/netfs/main.c index 5e77618a7940..c5a73c9ed126 100644 --- a/fs/netfs/main.c +++ b/fs/netfs/main.c @@ -31,6 +31,7 @@ static const char *netfs_origins[nr__netfs_io_origin] = { [NETFS_READAHEAD] = "RA", [NETFS_READPAGE] = "RP", [NETFS_READ_FOR_WRITE] = "RW", + [NETFS_COPY_TO_CACHE] = "CC", [NETFS_WRITEBACK] = "WB", [NETFS_WRITETHROUGH] = "WT", [NETFS_LAUNDER_WRITE] = "LW", diff --git a/fs/netfs/misc.c b/fs/netfs/misc.c index 90051ced8e2a..bc1fc54fb724 100644 --- a/fs/netfs/misc.c +++ b/fs/netfs/misc.c @@ -177,13 +177,11 @@ EXPORT_SYMBOL(netfs_clear_inode_writeback); */ void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length) { - struct netfs_folio *finfo = NULL; + struct netfs_folio *finfo; size_t flen = folio_size(folio); _enter("{%lx},%zx,%zx", folio->index, offset, length); - folio_wait_fscache(folio); - if (!folio_test_private(folio)) return; @@ -248,12 +246,6 @@ bool netfs_release_folio(struct folio *folio, gfp_t gfp) if (folio_test_private(folio)) return false; - if (folio_test_fscache(folio)) { - if (current_is_kswapd() || !(gfp & __GFP_FS)) - return false; - folio_wait_fscache(folio); - } - fscache_note_page_release(netfs_i_cookie(ctx)); return true; } diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c index 610ceb5bd86c..72b52f070270 100644 --- a/fs/netfs/objects.c +++ b/fs/netfs/objects.c @@ -45,8 +45,12 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping, refcount_set(&rreq->ref, 1); __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags); - if (cached) + if (cached) { __set_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags); + if (test_bit(NETFS_ICTX_USE_PGPRIV2, &ctx->flags)) + /* Filesystem uses deprecated PG_private_2 marking. */ + __set_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags); + } if (file && file->f_flags & O_NONBLOCK) __set_bit(NETFS_RREQ_NONBLOCK, &rreq->flags); if (rreq->netfs_ops->init_request) { |