summaryrefslogtreecommitdiff
path: root/fs/cifs
diff options
context:
space:
mode:
authorShyam Prasad N <sprasad@microsoft.com>2022-12-19 05:40:44 +0000
committerSteve French <stfrench@microsoft.com>2023-02-21 01:24:48 -0600
commitea90708d3cf3d0d92c02afe445ad463fb3c6bf10 (patch)
tree91dca5f122908052f5acfa1155421eb5b509df19 /fs/cifs
parente7388b8a1a5bdf47a9a46803a5686387c1ce060d (diff)
cifs: use the least loaded channel for sending requests
Till now, we've used a simple round robin approach to distribute the requests between the channels. This does not work well if the channels consume the requests at different speeds, even if the advertised speeds are the same. This change will allow the client to pick the channel with least number of requests currently in-flight. This will disregard the link speed, and select a channel based on the current load of the channels. For cases when all the channels are equally loaded, fall back to the old round robin method. Signed-off-by: Shyam Prasad N <sprasad@microsoft.com> Reviewed-by: Paulo Alcantara (SUSE) <pc@cjr.nz> Signed-off-by: Steve French <stfrench@microsoft.com>
Diffstat (limited to 'fs/cifs')
-rw-r--r--fs/cifs/transport.c33
1 files changed, 29 insertions, 4 deletions
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 520397a09616..b42050c68e6c 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -1007,15 +1007,40 @@ cifs_cancelled_callback(struct mid_q_entry *mid)
struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
{
uint index = 0;
+ unsigned int min_in_flight = UINT_MAX, max_in_flight = 0;
+ struct TCP_Server_Info *server = NULL;
+ int i;
if (!ses)
return NULL;
- /* round robin */
- index = (uint)atomic_inc_return(&ses->chan_seq);
-
spin_lock(&ses->chan_lock);
- index %= ses->chan_count;
+ for (i = 0; i < ses->chan_count; i++) {
+ server = ses->chans[i].server;
+ if (!server)
+ continue;
+
+ /*
+ * strictly speaking, we should pick up req_lock to read
+ * server->in_flight. But it shouldn't matter much here if we
+ * race while reading this data. The worst that can happen is
+ * that we could use a channel that's not least loaded. Avoiding
+ * taking the lock could help reduce wait time, which is
+ * important for this function
+ */
+ if (server->in_flight < min_in_flight) {
+ min_in_flight = server->in_flight;
+ index = i;
+ }
+ if (server->in_flight > max_in_flight)
+ max_in_flight = server->in_flight;
+ }
+
+ /* if all channels are equally loaded, fall back to round-robin */
+ if (min_in_flight == max_in_flight) {
+ index = (uint)atomic_inc_return(&ses->chan_seq);
+ index %= ses->chan_count;
+ }
spin_unlock(&ses->chan_lock);
return ses->chans[index].server;