diff options
-rw-r--r-- | fs/cifs/transport.c | 33 |
1 files changed, 29 insertions, 4 deletions
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 520397a09616..b42050c68e6c 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -1007,15 +1007,40 @@ cifs_cancelled_callback(struct mid_q_entry *mid) struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses) { uint index = 0; + unsigned int min_in_flight = UINT_MAX, max_in_flight = 0; + struct TCP_Server_Info *server = NULL; + int i; if (!ses) return NULL; - /* round robin */ - index = (uint)atomic_inc_return(&ses->chan_seq); - spin_lock(&ses->chan_lock); - index %= ses->chan_count; + for (i = 0; i < ses->chan_count; i++) { + server = ses->chans[i].server; + if (!server) + continue; + + /* + * strictly speaking, we should pick up req_lock to read + * server->in_flight. But it shouldn't matter much here if we + * race while reading this data. The worst that can happen is + * that we could use a channel that's not least loaded. Avoiding + * taking the lock could help reduce wait time, which is + * important for this function + */ + if (server->in_flight < min_in_flight) { + min_in_flight = server->in_flight; + index = i; + } + if (server->in_flight > max_in_flight) + max_in_flight = server->in_flight; + } + + /* if all channels are equally loaded, fall back to round-robin */ + if (min_in_flight == max_in_flight) { + index = (uint)atomic_inc_return(&ses->chan_seq); + index %= ses->chan_count; + } spin_unlock(&ses->chan_lock); return ses->chans[index].server; |