summaryrefslogtreecommitdiff
path: root/net/sunrpc/xprtrdma/svc_rdma_sendto.c
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2023-11-21 11:40:46 -0500
committerChuck Lever <chuck.lever@oracle.com>2024-01-07 17:54:27 -0500
commitbfb81535c2660d2bb8496e5cbb7480693188cd72 (patch)
tree1a266e11818c5518f89282f18890de8b6eac2f18 /net/sunrpc/xprtrdma/svc_rdma_sendto.c
parentf09c36c8dffc7dcf796b862bffdda5753bec84ef (diff)
svcrdma: Clean up locking
There's no need to protect llist_entry() with a spin lock. Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Diffstat (limited to 'net/sunrpc/xprtrdma/svc_rdma_sendto.c')
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 22c39ba923d2..09f5d0570bc9 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -201,10 +201,11 @@ struct svc_rdma_send_ctxt *svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma)
spin_lock(&rdma->sc_send_lock);
node = llist_del_first(&rdma->sc_send_ctxts);
+ spin_unlock(&rdma->sc_send_lock);
if (!node)
goto out_empty;
+
ctxt = llist_entry(node, struct svc_rdma_send_ctxt, sc_node);
- spin_unlock(&rdma->sc_send_lock);
out:
rpcrdma_set_xdrlen(&ctxt->sc_hdrbuf, 0);
@@ -217,7 +218,6 @@ out:
return ctxt;
out_empty:
- spin_unlock(&rdma->sc_send_lock);
ctxt = svc_rdma_send_ctxt_alloc(rdma);
if (!ctxt)
return NULL;