diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-09-16 12:13:31 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-09-16 12:13:31 +0200 |
commit | 35219bc5c71f4197c8bd10297597de797c1eece5 (patch) | |
tree | 2448156135b78f54cd341a8457ccd84a371ddac7 /fs/smb | |
parent | 9020d0d844ad58a051f90b1e5b82ba34123925b9 (diff) | |
parent | 4b40d43d9f951d87ae8dc414c2ef5ae50303a266 (diff) |
Merge tag 'vfs-6.12.netfs' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
Pull netfs updates from Christian Brauner:
"This contains the work to improve read/write performance for the new
netfs library.
The main performance enhancing changes are:
- Define a structure, struct folio_queue, and a new iterator type,
ITER_FOLIOQ, to hold a buffer as a replacement for ITER_XARRAY. See
that patch for questions about naming and form.
ITER_FOLIOQ is provided as a replacement for ITER_XARRAY. The
problem with an xarray is that accessing it requires the use of a
lock (typically the RCU read lock) - and this means that we can't
supply iterate_and_advance() with a step function that might sleep
(crypto for example) without having to drop the lock between pages.
ITER_FOLIOQ is the iterator for a chain of folio_queue structs,
where each folio_queue holds a small list of folios. A folio_queue
struct is a simpler structure than xarray and is not subject to
concurrent manipulation by the VM. folio_queue is used rather than
a bvec[] as it can form lists of indefinite size, adding to one end
and removing from the other on the fly.
- Provide a copy_folio_from_iter() wrapper.
- Make cifs RDMA support ITER_FOLIOQ.
- Use folio queues in the write-side helpers instead of xarrays.
- Add a function to reset the iterator in a subrequest.
- Simplify the write-side helpers to use sheaves to skip gaps rather
than trying to work out where gaps are.
- In afs, make the read subrequests asynchronous, putting them into
work items to allow the next patch to do progressive
unlocking/reading.
- Overhaul the read-side helpers to improve performance.
- Fix the caching of a partial block at the end of a file.
- Allow a store to be cancelled.
Then some changes for cifs to make it use folio queues instead of
xarrays for crypto bufferage:
- Use raw iteration functions rather than manually coding iteration
when hashing data.
- Switch to using folio_queue for crypto buffers.
- Remove the xarray bits.
Make some adjustments to the /proc/fs/netfs/stats file such that:
- All the netfs stats lines begin 'Netfs:' but change this to
something a bit more useful.
- Add a couple of stats counters to track the numbers of skips and
waits on the per-inode writeback serialisation lock to make it
easier to check for this as a source of performance loss.
Miscellaneous work:
- Ensure that the sb_writers lock is taken around
vfs_{set,remove}xattr() in the cachefiles code.
- Reduce the number of conditional branches in netfs_perform_write().
- Move the CIFS_INO_MODIFIED_ATTR flag to the netfs_inode struct and
remove cifs_post_modify().
- Move the max_len/max_nr_segs members from netfs_io_subrequest to
netfs_io_request as they're only needed for one subreq at a time.
- Add an 'unknown' source value for tracing purposes.
- Remove NETFS_COPY_TO_CACHE as it's no longer used.
- Set the request work function up front at allocation time.
- Use bh-disabling spinlocks for rreq->lock as cachefiles completion
may be run from block-filesystem DIO completion in softirq context.
- Remove fs/netfs/io.c"
* tag 'vfs-6.12.netfs' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs: (25 commits)
docs: filesystems: corrected grammar of netfs page
cifs: Don't support ITER_XARRAY
cifs: Switch crypto buffer to use a folio_queue rather than an xarray
cifs: Use iterate_and_advance*() routines directly for hashing
netfs: Cancel dirty folios that have no storage destination
cachefiles, netfs: Fix write to partial block at EOF
netfs: Remove fs/netfs/io.c
netfs: Speed up buffered reading
afs: Make read subreqs async
netfs: Simplify the writeback code
netfs: Provide an iterator-reset function
netfs: Use new folio_queue data type and iterator instead of xarray iter
cifs: Provide the capability to extract from ITER_FOLIOQ to RDMA SGEs
iov_iter: Provide copy_folio_from_iter()
mm: Define struct folio_queue and ITER_FOLIOQ to handle a sequence of folios
netfs: Use bh-disabling spinlocks for rreq->lock
netfs: Set the request work function upon allocation
netfs: Remove NETFS_COPY_TO_CACHE
netfs: Reserve netfs_sreq_source 0 as unset/unknown
netfs: Move max_len/max_nr_segs from netfs_io_subrequest to netfs_io_stream
...
Diffstat (limited to 'fs/smb')
-rw-r--r-- | fs/smb/client/cifsencrypt.c | 144 | ||||
-rw-r--r-- | fs/smb/client/cifsglob.h | 4 | ||||
-rw-r--r-- | fs/smb/client/cifssmb.c | 11 | ||||
-rw-r--r-- | fs/smb/client/file.c | 96 | ||||
-rw-r--r-- | fs/smb/client/smb2ops.c | 219 | ||||
-rw-r--r-- | fs/smb/client/smb2pdu.c | 27 | ||||
-rw-r--r-- | fs/smb/client/smbdirect.c | 82 |
7 files changed, 239 insertions, 344 deletions
diff --git a/fs/smb/client/cifsencrypt.c b/fs/smb/client/cifsencrypt.c index b0473c2567fe..7481b21a0489 100644 --- a/fs/smb/client/cifsencrypt.c +++ b/fs/smb/client/cifsencrypt.c @@ -21,127 +21,21 @@ #include <linux/random.h> #include <linux/highmem.h> #include <linux/fips.h> +#include <linux/iov_iter.h> #include "../common/arc4.h" #include <crypto/aead.h> -/* - * Hash data from a BVEC-type iterator. - */ -static int cifs_shash_bvec(const struct iov_iter *iter, ssize_t maxsize, - struct shash_desc *shash) +static size_t cifs_shash_step(void *iter_base, size_t progress, size_t len, + void *priv, void *priv2) { - const struct bio_vec *bv = iter->bvec; - unsigned long start = iter->iov_offset; - unsigned int i; - void *p; - int ret; - - for (i = 0; i < iter->nr_segs; i++) { - size_t off, len; - - len = bv[i].bv_len; - if (start >= len) { - start -= len; - continue; - } - - len = min_t(size_t, maxsize, len - start); - off = bv[i].bv_offset + start; + struct shash_desc *shash = priv; + int ret, *pret = priv2; - p = kmap_local_page(bv[i].bv_page); - ret = crypto_shash_update(shash, p + off, len); - kunmap_local(p); - if (ret < 0) - return ret; - - maxsize -= len; - if (maxsize <= 0) - break; - start = 0; + ret = crypto_shash_update(shash, iter_base, len); + if (ret < 0) { + *pret = ret; + return len; } - - return 0; -} - -/* - * Hash data from a KVEC-type iterator. - */ -static int cifs_shash_kvec(const struct iov_iter *iter, ssize_t maxsize, - struct shash_desc *shash) -{ - const struct kvec *kv = iter->kvec; - unsigned long start = iter->iov_offset; - unsigned int i; - int ret; - - for (i = 0; i < iter->nr_segs; i++) { - size_t len; - - len = kv[i].iov_len; - if (start >= len) { - start -= len; - continue; - } - - len = min_t(size_t, maxsize, len - start); - ret = crypto_shash_update(shash, kv[i].iov_base + start, len); - if (ret < 0) - return ret; - maxsize -= len; - - if (maxsize <= 0) - break; - start = 0; - } - - return 0; -} - -/* - * Hash data from an XARRAY-type iterator. - */ -static ssize_t cifs_shash_xarray(const struct iov_iter *iter, ssize_t maxsize, - struct shash_desc *shash) -{ - struct folio *folios[16], *folio; - unsigned int nr, i, j, npages; - loff_t start = iter->xarray_start + iter->iov_offset; - pgoff_t last, index = start / PAGE_SIZE; - ssize_t ret = 0; - size_t len, offset, foffset; - void *p; - - if (maxsize == 0) - return 0; - - last = (start + maxsize - 1) / PAGE_SIZE; - do { - nr = xa_extract(iter->xarray, (void **)folios, index, last, - ARRAY_SIZE(folios), XA_PRESENT); - if (nr == 0) - return -EIO; - - for (i = 0; i < nr; i++) { - folio = folios[i]; - npages = folio_nr_pages(folio); - foffset = start - folio_pos(folio); - offset = foffset % PAGE_SIZE; - for (j = foffset / PAGE_SIZE; j < npages; j++) { - len = min_t(size_t, maxsize, PAGE_SIZE - offset); - p = kmap_local_page(folio_page(folio, j)); - ret = crypto_shash_update(shash, p + offset, len); - kunmap_local(p); - if (ret < 0) - return ret; - maxsize -= len; - if (maxsize <= 0) - return 0; - start += len; - offset = 0; - index++; - } - } - } while (nr == ARRAY_SIZE(folios)); return 0; } @@ -151,21 +45,13 @@ static ssize_t cifs_shash_xarray(const struct iov_iter *iter, ssize_t maxsize, static int cifs_shash_iter(const struct iov_iter *iter, size_t maxsize, struct shash_desc *shash) { - if (maxsize == 0) - return 0; + struct iov_iter tmp_iter = *iter; + int err = -EIO; - switch (iov_iter_type(iter)) { - case ITER_BVEC: - return cifs_shash_bvec(iter, maxsize, shash); - case ITER_KVEC: - return cifs_shash_kvec(iter, maxsize, shash); - case ITER_XARRAY: - return cifs_shash_xarray(iter, maxsize, shash); - default: - pr_err("cifs_shash_iter(%u) unsupported\n", iov_iter_type(iter)); - WARN_ON_ONCE(1); - return -EIO; - } + if (iterate_and_advance_kernel(&tmp_iter, maxsize, shash, &err, + cifs_shash_step) != maxsize) + return err; + return 0; } int __cifs_calc_signature(struct smb_rqst *rqst, diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h index 9eae8649f90c..2709c7b0be85 100644 --- a/fs/smb/client/cifsglob.h +++ b/fs/smb/client/cifsglob.h @@ -255,7 +255,7 @@ struct smb_rqst { struct kvec *rq_iov; /* array of kvecs */ unsigned int rq_nvec; /* number of kvecs in array */ struct iov_iter rq_iter; /* Data iterator */ - struct xarray rq_buffer; /* Page buffer for encryption */ + struct folio_queue *rq_buffer; /* Buffer for encryption */ }; struct mid_q_entry; @@ -1485,7 +1485,6 @@ struct cifs_io_subrequest { struct cifs_io_request *req; }; ssize_t got_bytes; - size_t actual_len; unsigned int xid; int result; bool have_xid; @@ -1550,7 +1549,6 @@ struct cifsInodeInfo { #define CIFS_INO_DELETE_PENDING (3) /* delete pending on server */ #define CIFS_INO_INVALID_MAPPING (4) /* pagecache is invalid */ #define CIFS_INO_LOCK (5) /* lock bit for synchronization */ -#define CIFS_INO_MODIFIED_ATTR (6) /* Indicate change in mtime/ctime */ #define CIFS_INO_CLOSE_ON_LOCK (7) /* Not to defer the close when lock is set */ unsigned long flags; spinlock_t writers_lock; diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c index cfae2e918209..d81da161d3ed 100644 --- a/fs/smb/client/cifssmb.c +++ b/fs/smb/client/cifssmb.c @@ -1266,9 +1266,7 @@ static void cifs_readv_worker(struct work_struct *work) struct cifs_io_subrequest *rdata = container_of(work, struct cifs_io_subrequest, subreq.work); - netfs_subreq_terminated(&rdata->subreq, - (rdata->result == 0 || rdata->result == -EAGAIN) ? - rdata->got_bytes : rdata->result, true); + netfs_read_subreq_terminated(&rdata->subreq, rdata->result, true); } static void @@ -1327,15 +1325,16 @@ cifs_readv_callback(struct mid_q_entry *mid) __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags); rdata->result = 0; } else { - if (rdata->got_bytes < rdata->actual_len && - rdata->subreq.start + rdata->subreq.transferred + rdata->got_bytes == - ictx->remote_i_size) { + size_t trans = rdata->subreq.transferred + rdata->got_bytes; + if (trans < rdata->subreq.len && + rdata->subreq.start + trans == ictx->remote_i_size) { __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags); rdata->result = 0; } } rdata->credits.value = 0; + rdata->subreq.transferred += rdata->got_bytes; INIT_WORK(&rdata->subreq.work, cifs_readv_worker); queue_work(cifsiod_wq, &rdata->subreq.work); release_mid(mid); diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c index 2d387485f05b..bcde3f9c89e0 100644 --- a/fs/smb/client/file.c +++ b/fs/smb/client/file.c @@ -49,6 +49,7 @@ static void cifs_prepare_write(struct netfs_io_subrequest *subreq) struct cifs_io_subrequest *wdata = container_of(subreq, struct cifs_io_subrequest, subreq); struct cifs_io_request *req = wdata->req; + struct netfs_io_stream *stream = &req->rreq.io_streams[subreq->stream_nr]; struct TCP_Server_Info *server; struct cifsFileInfo *open_file = req->cfile; size_t wsize = req->rreq.wsize; @@ -73,7 +74,7 @@ retry: } } - rc = server->ops->wait_mtu_credits(server, wsize, &wdata->subreq.max_len, + rc = server->ops->wait_mtu_credits(server, wsize, &stream->sreq_max_len, &wdata->credits); if (rc < 0) { subreq->error = rc; @@ -92,7 +93,7 @@ retry: #ifdef CONFIG_CIFS_SMB_DIRECT if (server->smbd_conn) - subreq->max_nr_segs = server->smbd_conn->max_frmr_depth; + stream->sreq_max_segs = server->smbd_conn->max_frmr_depth; #endif } @@ -111,7 +112,6 @@ static void cifs_issue_write(struct netfs_io_subrequest *subreq) goto fail; } - wdata->actual_len = wdata->subreq.len; rc = adjust_credits(wdata->server, wdata, cifs_trace_rw_credits_issue_write_adjust); if (rc) goto fail; @@ -140,25 +140,22 @@ static void cifs_netfs_invalidate_cache(struct netfs_io_request *wreq) } /* - * Split the read up according to how many credits we can get for each piece. - * It's okay to sleep here if we need to wait for more credit to become - * available. - * - * We also choose the server and allocate an operation ID to be cleaned up - * later. + * Negotiate the size of a read operation on behalf of the netfs library. */ -static bool cifs_clamp_length(struct netfs_io_subrequest *subreq) +static int cifs_prepare_read(struct netfs_io_subrequest *subreq) { struct netfs_io_request *rreq = subreq->rreq; struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq); struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq); struct TCP_Server_Info *server = req->server; struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb); - size_t rsize; - int rc; + size_t size; + int rc = 0; - rdata->xid = get_xid(); - rdata->have_xid = true; + if (!rdata->have_xid) { + rdata->xid = get_xid(); + rdata->have_xid = true; + } rdata->server = server; if (cifs_sb->ctx->rsize == 0) @@ -166,13 +163,12 @@ static bool cifs_clamp_length(struct netfs_io_subrequest *subreq) server->ops->negotiate_rsize(tlink_tcon(req->cfile->tlink), cifs_sb->ctx); - rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize, - &rsize, &rdata->credits); - if (rc) { - subreq->error = rc; - return false; - } + &size, &rdata->credits); + if (rc) + return rc; + + rreq->io_streams[0].sreq_max_len = size; rdata->credits.in_flight_check = 1; rdata->credits.rreq_debug_id = rreq->debug_id; @@ -184,14 +180,11 @@ static bool cifs_clamp_length(struct netfs_io_subrequest *subreq) server->credits, server->in_flight, 0, cifs_trace_rw_credits_read_submit); - subreq->len = umin(subreq->len, rsize); - rdata->actual_len = subreq->len; - #ifdef CONFIG_CIFS_SMB_DIRECT if (server->smbd_conn) - subreq->max_nr_segs = server->smbd_conn->max_frmr_depth; + rreq->io_streams[0].sreq_max_segs = server->smbd_conn->max_frmr_depth; #endif - return true; + return 0; } /* @@ -200,59 +193,41 @@ static bool cifs_clamp_length(struct netfs_io_subrequest *subreq) * to only read a portion of that, but as long as we read something, the netfs * helper will call us again so that we can issue another read. */ -static void cifs_req_issue_read(struct netfs_io_subrequest *subreq) +static void cifs_issue_read(struct netfs_io_subrequest *subreq) { struct netfs_io_request *rreq = subreq->rreq; struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq); struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq); struct TCP_Server_Info *server = req->server; - struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb); int rc = 0; cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n", __func__, rreq->debug_id, subreq->debug_index, rreq->mapping, subreq->transferred, subreq->len); - if (test_bit(NETFS_SREQ_RETRYING, &subreq->flags)) { - /* - * As we're issuing a retry, we need to negotiate some new - * credits otherwise the server may reject the op with - * INVALID_PARAMETER. Note, however, we may get back less - * credit than we need to complete the op, in which case, we - * shorten the op and rely on additional rounds of retry. - */ - size_t rsize = umin(subreq->len - subreq->transferred, - cifs_sb->ctx->rsize); - - rc = server->ops->wait_mtu_credits(server, rsize, &rdata->actual_len, - &rdata->credits); - if (rc) - goto out; - - rdata->credits.in_flight_check = 1; - - trace_smb3_rw_credits(rdata->rreq->debug_id, - rdata->subreq.debug_index, - rdata->credits.value, - server->credits, server->in_flight, 0, - cifs_trace_rw_credits_read_resubmit); - } + rc = adjust_credits(server, rdata, cifs_trace_rw_credits_issue_read_adjust); + if (rc) + goto failed; if (req->cfile->invalidHandle) { do { rc = cifs_reopen_file(req->cfile, true); } while (rc == -EAGAIN); if (rc) - goto out; + goto failed; } if (subreq->rreq->origin != NETFS_DIO_READ) __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); + trace_netfs_sreq(subreq, netfs_sreq_trace_submit); rc = rdata->server->ops->async_readv(rdata); -out: if (rc) - netfs_subreq_terminated(subreq, rc, false); + goto failed; + return; + +failed: + netfs_read_subreq_terminated(subreq, rc, false); } /* @@ -316,12 +291,6 @@ static void cifs_rreq_done(struct netfs_io_request *rreq) inode_set_atime_to_ts(inode, inode_get_mtime(inode)); } -static void cifs_post_modify(struct inode *inode) -{ - /* Indication to update ctime and mtime as close is deferred */ - set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags); -} - static void cifs_free_request(struct netfs_io_request *rreq) { struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq); @@ -369,10 +338,9 @@ const struct netfs_request_ops cifs_req_ops = { .init_request = cifs_init_request, .free_request = cifs_free_request, .free_subrequest = cifs_free_subrequest, - .clamp_length = cifs_clamp_length, - .issue_read = cifs_req_issue_read, + .prepare_read = cifs_prepare_read, + .issue_read = cifs_issue_read, .done = cifs_rreq_done, - .post_modify = cifs_post_modify, .begin_writeback = cifs_begin_writeback, .prepare_write = cifs_prepare_write, .issue_write = cifs_issue_write, @@ -1396,7 +1364,7 @@ int cifs_close(struct inode *inode, struct file *file) dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL); if ((cfile->status_file_deleted == false) && (smb2_can_defer_close(inode, dclose))) { - if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) { + if (test_and_clear_bit(NETFS_ICTX_MODIFIED_ATTR, &cinode->netfs.flags)) { inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); } diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c index e6540072ffb0..159a063de6dd 100644 --- a/fs/smb/client/smb2ops.c +++ b/fs/smb/client/smb2ops.c @@ -13,6 +13,7 @@ #include <linux/sort.h> #include <crypto/aead.h> #include <linux/fiemap.h> +#include <linux/folio_queue.h> #include <uapi/linux/magic.h> #include "cifsfs.h" #include "cifsglob.h" @@ -301,7 +302,8 @@ smb2_adjust_credits(struct TCP_Server_Info *server, unsigned int /*enum smb3_rw_credits_trace*/ trace) { struct cifs_credits *credits = &subreq->credits; - int new_val = DIV_ROUND_UP(subreq->actual_len, SMB2_MAX_BUFFER_SIZE); + int new_val = DIV_ROUND_UP(subreq->subreq.len - subreq->subreq.transferred, + SMB2_MAX_BUFFER_SIZE); int scredits, in_flight; if (!credits->value || credits->value == new_val) @@ -4392,30 +4394,86 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst, } /* - * Clear a read buffer, discarding the folios which have XA_MARK_0 set. + * Clear a read buffer, discarding the folios which have the 1st mark set. */ -static void cifs_clear_xarray_buffer(struct xarray *buffer) +static void cifs_clear_folioq_buffer(struct folio_queue *buffer) { + struct folio_queue *folioq; + + while ((folioq = buffer)) { + for (int s = 0; s < folioq_count(folioq); s++) + if (folioq_is_marked(folioq, s)) + folio_put(folioq_folio(folioq, s)); + buffer = folioq->next; + kfree(folioq); + } +} + +/* + * Allocate buffer space into a folio queue. + */ +static struct folio_queue *cifs_alloc_folioq_buffer(ssize_t size) +{ + struct folio_queue *buffer = NULL, *tail = NULL, *p; struct folio *folio; + unsigned int slot; + + do { + if (!tail || folioq_full(tail)) { + p = kmalloc(sizeof(*p), GFP_NOFS); + if (!p) + goto nomem; + folioq_init(p); + if (tail) { + tail->next = p; + p->prev = tail; + } else { + buffer = p; + } + tail = p; + } + + folio = folio_alloc(GFP_KERNEL|__GFP_HIGHMEM, 0); + if (!folio) + goto nomem; + + slot = folioq_append_mark(tail, folio); + size -= folioq_folio_size(tail, slot); + } while (size > 0); + + return buffer; + +nomem: + cifs_clear_folioq_buffer(buffer); + return NULL; +} + +/* + * Copy data from an iterator to the folios in a folio queue buffer. + */ +static bool cifs_copy_iter_to_folioq(struct iov_iter *iter, size_t size, + struct folio_queue *buffer) +{ + for (; buffer; buffer = buffer->next) { + for (int s = 0; s < folioq_count(buffer); s++) { + struct folio *folio = folioq_folio(buffer, s); + size_t part = folioq_folio_size(buffer, s); - XA_STATE(xas, buffer, 0); + part = umin(part, size); - rcu_read_lock(); - xas_for_each_marked(&xas, folio, ULONG_MAX, XA_MARK_0) { - folio_put(folio); + if (copy_folio_from_iter(folio, 0, part, iter) != part) + return false; + size -= part; + } } - rcu_read_unlock(); - xa_destroy(buffer); + return true; } void smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst) { - int i; - - for (i = 0; i < num_rqst; i++) - if (!xa_empty(&rqst[i].rq_buffer)) - cifs_clear_xarray_buffer(&rqst[i].rq_buffer); + for (int i = 0; i < num_rqst; i++) + cifs_clear_folioq_buffer(rqst[i].rq_buffer); } /* @@ -4436,52 +4494,32 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *new_rq, struct smb_rqst *old_rq) { struct smb2_transform_hdr *tr_hdr = new_rq[0].rq_iov[0].iov_base; - struct page *page; unsigned int orig_len = 0; - int i, j; int rc = -ENOMEM; - for (i = 1; i < num_rqst; i++) { + for (int i = 1; i < num_rqst; i++) { struct smb_rqst *old = &old_rq[i - 1]; struct smb_rqst *new = &new_rq[i]; - struct xarray *buffer = &new->rq_buffer; - size_t size = iov_iter_count(&old->rq_iter), seg, copied = 0; + struct folio_queue *buffer; + size_t size = iov_iter_count(&old->rq_iter); orig_len += smb_rqst_len(server, old); new->rq_iov = old->rq_iov; new->rq_nvec = old->rq_nvec; - xa_init(buffer); - if (size > 0) { - unsigned int npages = DIV_ROUND_UP(size, PAGE_SIZE); - - for (j = 0; j < npages; j++) { - void *o; - - rc = -ENOMEM; - page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM); - if (!page) - goto err_free; - page->index = j; - o = xa_store(buffer, j, page, GFP_KERNEL); - if (xa_is_err(o)) { - rc = xa_err(o); - put_page(page); - goto err_free; - } + buffer = cifs_alloc_folioq_buffer(size); + if (!buffer) + goto err_free; - xa_set_mark(buffer, j, XA_MARK_0); + new->rq_buffer = buffer; + iov_iter_folio_queue(&new->rq_iter, ITER_SOURCE, + buffer, 0, 0, size); - seg = min_t(size_t, size - copied, PAGE_SIZE); - if (copy_page_from_iter(page, 0, seg, &old->rq_iter) != seg) { - rc = -EFAULT; - goto err_free; - } - copied += seg; + if (!cifs_copy_iter_to_folioq(&old->rq_iter, size, buffer)) { + rc = -EIO; + goto err_free; } - iov_iter_xarray(&new->rq_iter, ITER_SOURCE, - buffer, 0, size); } } @@ -4545,22 +4583,23 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf, } static int -cifs_copy_pages_to_iter(struct xarray *pages, unsigned int data_size, - unsigned int skip, struct iov_iter *iter) +cifs_copy_folioq_to_iter(struct folio_queue *folioq, size_t data_size, + size_t skip, struct iov_iter *iter) { - struct page *page; - unsigned long index; - - xa_for_each(pages, index, page) { - size_t n, len = min_t(unsigned int, PAGE_SIZE - skip, data_size); - - n = copy_page_to_iter(page, skip, len, iter); - if (n != len) { - cifs_dbg(VFS, "%s: something went wrong\n", __func__); - return -EIO; + for (; folioq; folioq = folioq->next) { + for (int s = 0; s < folioq_count(folioq); s++) { + struct folio *folio = folioq_folio(folioq, s); + size_t fsize = folio_size(folio); + size_t n, len = umin(fsize - skip, data_size); + + n = copy_folio_to_iter(folio, skip, len, iter); + if (n != len) { + cifs_dbg(VFS, "%s: something went wrong\n", __func__); + return -EIO; + } + data_size -= n; + skip = 0; } - data_size -= n; - skip = 0; } return 0; @@ -4568,8 +4607,8 @@ cifs_copy_pages_to_iter(struct xarray *pages, unsigned int data_size, static int handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid, - char *buf, unsigned int buf_len, struct xarray *pages, - unsigned int pages_len, bool is_offloaded) + char *buf, unsigned int buf_len, struct folio_queue *buffer, + unsigned int buffer_len, bool is_offloaded) { unsigned int data_offset; unsigned int data_len; @@ -4666,7 +4705,7 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid, return 0; } - if (data_len > pages_len - pad_len) { + if (data_len > buffer_len - pad_len) { /* data_len is corrupt -- discard frame */ rdata->result = -EIO; if (is_offloaded) @@ -4677,8 +4716,8 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid, } /* Copy the data to the output I/O iterator. */ - rdata->result = cifs_copy_pages_to_iter(pages, pages_len, - cur_off, &rdata->subreq.io_iter); + rdata->result = cifs_copy_folioq_to_iter(buffer, buffer_len, + cur_off, &rdata->subreq.io_iter); if (rdata->result != 0) { if (is_offloaded) mid->mid_state = MID_RESPONSE_MALFORMED; @@ -4686,12 +4725,11 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid, dequeue_mid(mid, rdata->result); return 0; } - rdata->got_bytes = pages_len; + rdata->got_bytes = buffer_len; } else if (buf_len >= data_offset + data_len) { /* read response payload is in buf */ - WARN_ONCE(pages && !xa_empty(pages), - "read data can be either in buf or in pages"); + WARN_ONCE(buffer, "read data can be either in buf or in buffer"); length = copy_to_iter(buf + data_offset, data_len, &rdata->subreq.io_iter); if (length < 0) return length; @@ -4717,7 +4755,7 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid, struct smb2_decrypt_work { struct work_struct decrypt; struct TCP_Server_Info *server; - struct xarray buffer; + struct folio_queue *buffer; char *buf; unsigned int len; }; @@ -4731,7 +4769,7 @@ static void smb2_decrypt_offload(struct work_struct *work) struct mid_q_entry *mid; struct iov_iter iter; - iov_iter_xarray(&iter, ITER_DEST, &dw->buffer, 0, dw->len); + iov_iter_folio_queue(&iter, ITER_DEST, dw->buffer, 0, 0, dw->len); rc = decrypt_raw_data(dw->server, dw->buf, dw->server->vals->read_rsp_size, &iter, true); if (rc) { @@ -4747,7 +4785,7 @@ static void smb2_decrypt_offload(struct work_struct *work) mid->decrypted = true; rc = handle_read_data(dw->server, mid, dw->buf, dw->server->vals->read_rsp_size, - &dw->buffer, dw->len, + dw->buffer, dw->len, true); if (rc >= 0) { #ifdef CONFIG_CIFS_STATS2 @@ -4780,7 +4818,7 @@ static void smb2_decrypt_offload(struct work_struct *work) } free_pages: - cifs_clear_xarray_buffer(&dw->buffer); + cifs_clear_folioq_buffer(dw->buffer); cifs_small_buf_release(dw->buf); kfree(dw); } @@ -4790,20 +4828,17 @@ static int receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid, int *num_mids) { - struct page *page; char *buf = server->smallbuf; struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf; struct iov_iter iter; - unsigned int len, npages; + unsigned int len; unsigned int buflen = server->pdu_size; int rc; - int i = 0; struct smb2_decrypt_work *dw; dw = kzalloc(sizeof(struct smb2_decrypt_work), GFP_KERNEL); if (!dw) return -ENOMEM; - xa_init(&dw->buffer); INIT_WORK(&dw->decrypt, smb2_decrypt_offload); dw->server = server; @@ -4819,26 +4854,14 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid, len = le32_to_cpu(tr_hdr->OriginalMessageSize) - server->vals->read_rsp_size; dw->len = len; - npages = DIV_ROUND_UP(len, PAGE_SIZE); + len = round_up(dw->len, PAGE_SIZE); rc = -ENOMEM; - for (; i < npages; i++) { - void *old; - - page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM); - if (!page) - goto discard_data; - page->index = i; - old = xa_store(&dw->buffer, i, page, GFP_KERNEL); - if (xa_is_err(old)) { - rc = xa_err(old); - put_page(page); - goto discard_data; - } - xa_set_mark(&dw->buffer, i, XA_MARK_0); - } + dw->buffer = cifs_alloc_folioq_buffer(len); + if (!dw->buffer) + goto discard_data; - iov_iter_xarray(&iter, ITER_DEST, &dw->buffer, 0, npages * PAGE_SIZE); + iov_iter_folio_queue(&iter, ITER_DEST, dw->buffer, 0, 0, len); /* Read the data into the buffer and clear excess bufferage. */ rc = cifs_read_iter_from_socket(server, &iter, dw->len); @@ -4846,9 +4869,9 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid, goto discard_data; server->total_read += rc; - if (rc < npages * PAGE_SIZE) - iov_iter_zero(npages * PAGE_SIZE - rc, &iter); - iov_iter_revert(&iter, npages * PAGE_SIZE); + if (rc < len) + iov_iter_zero(len - rc, &iter); + iov_iter_revert(&iter, len); iov_iter_truncate(&iter, dw->len); rc = cifs_discard_remaining_data(server); @@ -4883,7 +4906,7 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid, (*mid)->decrypted = true; rc = handle_read_data(server, *mid, buf, server->vals->read_rsp_size, - &dw->buffer, dw->len, false); + dw->buffer, dw->len, false); if (rc >= 0) { if (server->ops->is_network_name_deleted) { server->ops->is_network_name_deleted(buf, @@ -4893,7 +4916,7 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid, } free_pages: - cifs_clear_xarray_buffer(&dw->buffer); + cifs_clear_folioq_buffer(dw->buffer); free_dw: kfree(dw); return rc; diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c index 88dc49d67037..95377bb91950 100644 --- a/fs/smb/client/smb2pdu.c +++ b/fs/smb/client/smb2pdu.c @@ -4498,9 +4498,7 @@ static void smb2_readv_worker(struct work_struct *work) struct cifs_io_subrequest *rdata = container_of(work, struct cifs_io_subrequest, subreq.work); - netfs_subreq_terminated(&rdata->subreq, - (rdata->result == 0 || rdata->result == -EAGAIN) ? - rdata->got_bytes : rdata->result, true); + netfs_read_subreq_terminated(&rdata->subreq, rdata->result, false); } static void @@ -4532,7 +4530,7 @@ smb2_readv_callback(struct mid_q_entry *mid) cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%zu/%zu\n", __func__, mid->mid, mid->mid_state, rdata->result, - rdata->actual_len, rdata->subreq.len - rdata->subreq.transferred); + rdata->got_bytes, rdata->subreq.len - rdata->subreq.transferred); switch (mid->mid_state) { case MID_RESPONSE_RECEIVED: @@ -4554,6 +4552,7 @@ smb2_readv_callback(struct mid_q_entry *mid) break; case MID_REQUEST_SUBMITTED: case MID_RETRY_NEEDED: + __set_bit(NETFS_SREQ_NEED_RETRY, &rdata->subreq.flags); rdata->result = -EAGAIN; if (server->sign && rdata->got_bytes) /* reset bytes number since we can not check a sign */ @@ -4588,7 +4587,7 @@ smb2_readv_callback(struct mid_q_entry *mid) rdata->req->cfile->fid.persistent_fid, tcon->tid, tcon->ses->Suid, rdata->subreq.start + rdata->subreq.transferred, - rdata->actual_len, + rdata->subreq.len - rdata->subreq.transferred, rdata->result); } else trace_smb3_read_done(rdata->rreq->debug_id, @@ -4603,9 +4602,9 @@ smb2_readv_callback(struct mid_q_entry *mid) __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags); rdata->result = 0; } else { - if (rdata->got_bytes < rdata->actual_len && - rdata->subreq.start + rdata->subreq.transferred + rdata->got_bytes == - ictx->remote_i_size) { + size_t trans = rdata->subreq.transferred + rdata->got_bytes; + if (trans < rdata->subreq.len && + rdata->subreq.start + trans == ictx->remote_i_size) { __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags); rdata->result = 0; } @@ -4614,6 +4613,10 @@ smb2_readv_callback(struct mid_q_entry *mid) server->credits, server->in_flight, 0, cifs_trace_rw_credits_read_response_clear); rdata->credits.value = 0; + rdata->subreq.transferred += rdata->got_bytes; + if (rdata->subreq.start + rdata->subreq.transferred >= rdata->subreq.rreq->i_size) + __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags); + trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_progress); INIT_WORK(&rdata->subreq.work, smb2_readv_worker); queue_work(cifsiod_wq, &rdata->subreq.work); release_mid(mid); @@ -4648,7 +4651,7 @@ smb2_async_readv(struct cifs_io_subrequest *rdata) io_parms.tcon = tlink_tcon(rdata->req->cfile->tlink); io_parms.server = server = rdata->server; io_parms.offset = subreq->start + subreq->transferred; - io_parms.length = rdata->actual_len; + io_parms.length = subreq->len - subreq->transferred; io_parms.persistent_fid = rdata->req->cfile->fid.persistent_fid; io_parms.volatile_fid = rdata->req->cfile->fid.volatile_fid; io_parms.pid = rdata->req->pid; @@ -4669,7 +4672,7 @@ smb2_async_readv(struct cifs_io_subrequest *rdata) shdr = (struct smb2_hdr *)buf; if (rdata->credits.value > 0) { - shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->actual_len, + shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(io_parms.length, SMB2_MAX_BUFFER_SIZE)); credit_request = le16_to_cpu(shdr->CreditCharge) + 8; if (server->credits >= server->max_credits) @@ -4697,7 +4700,8 @@ smb2_async_readv(struct cifs_io_subrequest *rdata) rdata->xid, io_parms.persistent_fid, io_parms.tcon->tid, io_parms.tcon->ses->Suid, - io_parms.offset, rdata->actual_len, rc); + io_parms.offset, + subreq->len - subreq->transferred, rc); } async_readv_out: @@ -4880,6 +4884,7 @@ smb2_writev_callback(struct mid_q_entry *mid) server->credits, server->in_flight, 0, cifs_trace_rw_credits_write_response_clear); wdata->credits.value = 0; + trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_progress); cifs_write_subrequest_terminated(wdata, result ?: written, true); release_mid(mid); trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, 0, diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c index 7bcc379014ca..80262a36030f 100644 --- a/fs/smb/client/smbdirect.c +++ b/fs/smb/client/smbdirect.c @@ -6,6 +6,7 @@ */ #include <linux/module.h> #include <linux/highmem.h> +#include <linux/folio_queue.h> #include "smbdirect.h" #include "cifs_debug.h" #include "cifsproto.h" @@ -2463,6 +2464,8 @@ static ssize_t smb_extract_bvec_to_rdma(struct iov_iter *iter, start = 0; } + if (ret > 0) + iov_iter_advance(iter, ret); return ret; } @@ -2519,50 +2522,65 @@ static ssize_t smb_extract_kvec_to_rdma(struct iov_iter *iter, start = 0; } + if (ret > 0) + iov_iter_advance(iter, ret); return ret; } /* - * Extract folio fragments from an XARRAY-class iterator and add them to an - * RDMA list. The folios are not pinned. + * Extract folio fragments from a FOLIOQ-class iterator and add them to an RDMA + * list. The folios are not pinned. */ -static ssize_t smb_extract_xarray_to_rdma(struct iov_iter *iter, +static ssize_t smb_extract_folioq_to_rdma(struct iov_iter *iter, struct smb_extract_to_rdma *rdma, ssize_t maxsize) { - struct xarray *xa = iter->xarray; - struct folio *folio; - loff_t start = iter->xarray_start + iter->iov_offset; - pgoff_t index = start / PAGE_SIZE; + const struct folio_queue *folioq = iter->folioq; + unsigned int slot = iter->folioq_slot; ssize_t ret = 0; - size_t off, len; - XA_STATE(xas, xa, index); + size_t offset = iter->iov_offset; - rcu_read_lock(); + BUG_ON(!folioq); - xas_for_each(&xas, folio, ULONG_MAX) { - if (xas_retry(&xas, folio)) - continue; - if (WARN_ON(xa_is_value(folio))) - break; - if (WARN_ON(folio_test_hugetlb(folio))) - break; + if (slot >= folioq_nr_slots(folioq)) { + folioq = folioq->next; + if (WARN_ON_ONCE(!folioq)) + return -EIO; + slot = 0; + } - off = offset_in_folio(folio, start); - len = min_t(size_t, maxsize, folio_size(folio) - off); + do { + struct folio *folio = folioq_folio(folioq, slot); + size_t fsize = folioq_folio_size(folioq, slot); - if (!smb_set_sge(rdma, folio_page(folio, 0), off, len)) { - rcu_read_unlock(); - return -EIO; + if (offset < fsize) { + size_t part = umin(maxsize - ret, fsize - offset); + + if (!smb_set_sge(rdma, folio_page(folio, 0), offset, part)) + return -EIO; + + offset += part; + ret += part; } - maxsize -= len; - ret += len; - if (rdma->nr_sge >= rdma->max_sge || maxsize <= 0) - break; - } + if (offset >= fsize) { + offset = 0; + slot++; + if (slot >= folioq_nr_slots(folioq)) { + if (!folioq->next) { + WARN_ON_ONCE(ret < iter->count); + break; + } + folioq = folioq->next; + slot = 0; + } + } + } while (rdma->nr_sge < rdma->max_sge || maxsize > 0); - rcu_read_unlock(); + iter->folioq = folioq; + iter->folioq_slot = slot; + iter->iov_offset = offset; + iter->count -= ret; return ret; } @@ -2590,17 +2608,15 @@ static ssize_t smb_extract_iter_to_rdma(struct iov_iter *iter, size_t len, case ITER_KVEC: ret = smb_extract_kvec_to_rdma(iter, rdma, len); break; - case ITER_XARRAY: - ret = smb_extract_xarray_to_rdma(iter, rdma, len); + case ITER_FOLIOQ: + ret = smb_extract_folioq_to_rdma(iter, rdma, len); break; default: WARN_ON_ONCE(1); return -EIO; } - if (ret > 0) { - iov_iter_advance(iter, ret); - } else if (ret < 0) { + if (ret < 0) { while (rdma->nr_sge > before) { struct ib_sge *sge = &rdma->sge[rdma->nr_sge--]; |