From 3821e232eb3b7591f07ce4c389313ab55ebee372 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 9 Nov 2020 14:39:15 -0500 Subject: xprtrdma: Replace dprintk call sites in ERR_CHUNK path Signed-off-by: Chuck Lever Signed-off-by: Anna Schumaker --- net/sunrpc/xprtrdma/rpc_rdma.c | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index 0f5120c7668f..c178f93aa40b 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c @@ -1322,20 +1322,13 @@ rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep, p = xdr_inline_decode(xdr, 2 * sizeof(*p)); if (!p) break; - dprintk("RPC: %s: server reports " - "version error (%u-%u), xid %08x\n", __func__, - be32_to_cpup(p), be32_to_cpu(*(p + 1)), - be32_to_cpu(rep->rr_xid)); + trace_xprtrdma_err_vers(rqst, p, p + 1); break; case err_chunk: - dprintk("RPC: %s: server reports " - "header decoding error, xid %08x\n", __func__, - be32_to_cpu(rep->rr_xid)); + trace_xprtrdma_err_chunk(rqst); break; default: - dprintk("RPC: %s: server reports " - "unrecognized error %d, xid %08x\n", __func__, - be32_to_cpup(p), be32_to_cpu(rep->rr_xid)); + trace_xprtrdma_err_unrecognized(rqst, p); } return -EIO; -- cgit v1.2.3-58-ga151 From af5865d2783958294179da56a4d073a9630b3068 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 9 Nov 2020 14:39:21 -0500 Subject: xprtrdma: Introduce Receive completion IDs Set up a completion ID in each rpcrdma_rep. The ID is used to match an incoming Receive completion to a transport and to a previous ib_post_recv(). Signed-off-by: Chuck Lever Signed-off-by: Anna Schumaker --- include/trace/events/rpcrdma.h | 46 +++++++---------------------------------- net/sunrpc/xprtrdma/verbs.c | 6 +++++- net/sunrpc/xprtrdma/xprt_rdma.h | 5 +++++ 3 files changed, 18 insertions(+), 39 deletions(-) (limited to 'net') diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index d5e66428e27e..1c91c8e721e7 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -771,15 +771,17 @@ TRACE_EVENT(xprtrdma_post_recv, TP_ARGS(rep), TP_STRUCT__entry( - __field(const void *, rep) + __field(u32, cq_id) + __field(int, completion_id) ), TP_fast_assign( - __entry->rep = rep; + __entry->cq_id = rep->rr_cid.ci_queue_id; + __entry->completion_id = rep->rr_cid.ci_completion_id; ), - TP_printk("rep=%p", - __entry->rep + TP_printk("cq.id=%d cid=%d", + __entry->cq_id, __entry->completion_id ) ); @@ -845,6 +847,8 @@ TRACE_EVENT(xprtrdma_post_linv, ** Completion events **/ +DEFINE_COMPLETION_EVENT(xprtrdma_wc_receive); + TRACE_EVENT(xprtrdma_wc_send, TP_PROTO( const struct rpcrdma_sendctx *sc, @@ -876,40 +880,6 @@ TRACE_EVENT(xprtrdma_wc_send, ) ); -TRACE_EVENT(xprtrdma_wc_receive, - TP_PROTO( - const struct ib_wc *wc - ), - - TP_ARGS(wc), - - TP_STRUCT__entry( - __field(const void *, rep) - __field(u32, byte_len) - __field(unsigned int, status) - __field(u32, vendor_err) - ), - - TP_fast_assign( - __entry->rep = container_of(wc->wr_cqe, struct rpcrdma_rep, - rr_cqe); - __entry->status = wc->status; - if (wc->status) { - __entry->byte_len = 0; - __entry->vendor_err = wc->vendor_err; - } else { - __entry->byte_len = wc->byte_len; - __entry->vendor_err = 0; - } - ), - - TP_printk("rep=%p %u bytes: %s (%u/0x%x)", - __entry->rep, __entry->byte_len, - rdma_show_wc_status(__entry->status), - __entry->status, __entry->vendor_err - ) -); - DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg); DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li); DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake); diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index ad6e2e4994ce..2c8d2801ec4f 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -186,7 +186,7 @@ static void rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) struct rpcrdma_xprt *r_xprt = cq->cq_context; /* WARNING: Only wr_cqe and status are reliable at this point */ - trace_xprtrdma_wc_receive(wc); + trace_xprtrdma_wc_receive(wc, &rep->rr_cid); --r_xprt->rx_ep->re_receive_count; if (wc->status != IB_WC_SUCCESS) goto out_flushed; @@ -972,6 +972,9 @@ struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt, if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf)) goto out_free_regbuf; + rep->rr_cid.ci_completion_id = + atomic_inc_return(&r_xprt->rx_ep->re_completion_ids); + xdr_buf_init(&rep->rr_hdrbuf, rdmab_data(rep->rr_rdmabuf), rdmab_length(rep->rr_rdmabuf)); rep->rr_cqe.done = rpcrdma_wc_receive; @@ -1411,6 +1414,7 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp) if (!rep) break; + rep->rr_cid.ci_queue_id = ep->re_attr.recv_cq->res.id; trace_xprtrdma_post_recv(rep); rep->rr_recv_wr.next = wr; wr = &rep->rr_recv_wr; diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index 43974ef39a50..b94940bc67aa 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h @@ -53,6 +53,7 @@ #include /* RDMA verbs api */ #include /* rpc_xprt */ +#include /* completion IDs */ #include /* RPC/RDMA protocol */ #include /* xprt parameters */ @@ -93,6 +94,8 @@ struct rpcrdma_ep { unsigned int re_max_requests; /* depends on device */ unsigned int re_inline_send; /* negotiated */ unsigned int re_inline_recv; /* negotiated */ + + atomic_t re_completion_ids; }; /* Pre-allocate extra Work Requests for handling backward receives @@ -180,6 +183,8 @@ enum { struct rpcrdma_rep { struct ib_cqe rr_cqe; + struct rpc_rdma_cid rr_cid; + __be32 rr_xid; __be32 rr_vers; __be32 rr_proc; -- cgit v1.2.3-58-ga151 From b2e7467f26d7813d98cbaad5e62b54960f2c071b Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 9 Nov 2020 14:39:26 -0500 Subject: xprtrdma: Introduce Send completion IDs Set up a completion ID in each rpcrdma_req. The ID is used to match an incoming Send completion to a transport and to a previous ib_post_send(). Signed-off-by: Chuck Lever Signed-off-by: Anna Schumaker --- include/trace/events/rpcrdma.h | 47 ++++++++--------------------------------- net/sunrpc/xprtrdma/verbs.c | 5 ++++- net/sunrpc/xprtrdma/xprt_rdma.h | 1 + 3 files changed, 14 insertions(+), 39 deletions(-) (limited to 'net') diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 1c91c8e721e7..ab239f4f924e 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -735,8 +735,8 @@ TRACE_EVENT(xprtrdma_post_send, TP_ARGS(req), TP_STRUCT__entry( - __field(const void *, req) - __field(const void *, sc) + __field(u32, cq_id) + __field(int, completion_id) __field(unsigned int, task_id) __field(unsigned int, client_id) __field(int, num_sge) @@ -745,20 +745,21 @@ TRACE_EVENT(xprtrdma_post_send, TP_fast_assign( const struct rpc_rqst *rqst = &req->rl_slot; + const struct rpcrdma_sendctx *sc = req->rl_sendctx; + __entry->cq_id = sc->sc_cid.ci_queue_id; + __entry->completion_id = sc->sc_cid.ci_completion_id; __entry->task_id = rqst->rq_task->tk_pid; __entry->client_id = rqst->rq_task->tk_client ? rqst->rq_task->tk_client->cl_clid : -1; - __entry->req = req; - __entry->sc = req->rl_sendctx; __entry->num_sge = req->rl_wr.num_sge; __entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED; ), - TP_printk("task:%u@%u req=%p sc=%p (%d SGE%s) %s", + TP_printk("task:%u@%u cq.id=%u cid=%d (%d SGE%s) %s", __entry->task_id, __entry->client_id, - __entry->req, __entry->sc, __entry->num_sge, - (__entry->num_sge == 1 ? "" : "s"), + __entry->cq_id, __entry->completion_id, + __entry->num_sge, (__entry->num_sge == 1 ? "" : "s"), (__entry->signaled ? "signaled" : "") ) ); @@ -848,37 +849,7 @@ TRACE_EVENT(xprtrdma_post_linv, **/ DEFINE_COMPLETION_EVENT(xprtrdma_wc_receive); - -TRACE_EVENT(xprtrdma_wc_send, - TP_PROTO( - const struct rpcrdma_sendctx *sc, - const struct ib_wc *wc - ), - - TP_ARGS(sc, wc), - - TP_STRUCT__entry( - __field(const void *, req) - __field(const void *, sc) - __field(unsigned int, unmap_count) - __field(unsigned int, status) - __field(unsigned int, vendor_err) - ), - - TP_fast_assign( - __entry->req = sc->sc_req; - __entry->sc = sc; - __entry->unmap_count = sc->sc_unmap_count; - __entry->status = wc->status; - __entry->vendor_err = __entry->status ? wc->vendor_err : 0; - ), - - TP_printk("req=%p sc=%p unmapped=%u: %s (%u/0x%x)", - __entry->req, __entry->sc, __entry->unmap_count, - rdma_show_wc_status(__entry->status), - __entry->status, __entry->vendor_err - ) -); +DEFINE_COMPLETION_EVENT(xprtrdma_wc_send); DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg); DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li); diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 2c8d2801ec4f..63837b5d14e5 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -167,7 +167,7 @@ static void rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) struct rpcrdma_xprt *r_xprt = cq->cq_context; /* WARNING: Only wr_cqe and status are reliable at this point */ - trace_xprtrdma_wc_send(sc, wc); + trace_xprtrdma_wc_send(wc, &sc->sc_cid); rpcrdma_sendctx_put_locked(r_xprt, sc); rpcrdma_flush_disconnect(r_xprt, wc); } @@ -643,6 +643,9 @@ static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ep *ep) return NULL; sc->sc_cqe.done = rpcrdma_wc_send; + sc->sc_cid.ci_queue_id = ep->re_attr.send_cq->res.id; + sc->sc_cid.ci_completion_id = + atomic_inc_return(&ep->re_completion_ids); return sc; } diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index b94940bc67aa..4eb8e32b9f4a 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h @@ -216,6 +216,7 @@ enum { struct rpcrdma_req; struct rpcrdma_sendctx { struct ib_cqe sc_cqe; + struct rpc_rdma_cid sc_cid; struct rpcrdma_req *sc_req; unsigned int sc_unmap_count; struct ib_sge sc_sges[]; -- cgit v1.2.3-58-ga151 From 5ecef9c84366955f61e379140c1065d576c66ada Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 9 Nov 2020 14:39:31 -0500 Subject: xprtrdma: Introduce FRWR completion IDs Set up a completion ID in each rpcrdma_frwr. The ID is used to match an incoming completion to a transport (CQ) and other MR-related activity. Signed-off-by: Chuck Lever Signed-off-by: Anna Schumaker --- include/trace/events/rpcrdma.h | 44 ++++------------------------------------- net/sunrpc/xprtrdma/frwr_ops.c | 29 ++++++++++++++++++++------- net/sunrpc/xprtrdma/xprt_rdma.h | 1 + 3 files changed, 27 insertions(+), 47 deletions(-) (limited to 'net') diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index ab239f4f924e..9e30f8aa3562 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -261,41 +261,6 @@ DECLARE_EVENT_CLASS(xprtrdma_wrch_event, ), \ TP_ARGS(task, mr, nsegs)) -DECLARE_EVENT_CLASS(xprtrdma_frwr_done, - TP_PROTO( - const struct ib_wc *wc, - const struct rpcrdma_frwr *frwr - ), - - TP_ARGS(wc, frwr), - - TP_STRUCT__entry( - __field(u32, mr_id) - __field(unsigned int, status) - __field(unsigned int, vendor_err) - ), - - TP_fast_assign( - __entry->mr_id = frwr->fr_mr->res.id; - __entry->status = wc->status; - __entry->vendor_err = __entry->status ? wc->vendor_err : 0; - ), - - TP_printk( - "mr.id=%u: %s (%u/0x%x)", - __entry->mr_id, rdma_show_wc_status(__entry->status), - __entry->status, __entry->vendor_err - ) -); - -#define DEFINE_FRWR_DONE_EVENT(name) \ - DEFINE_EVENT(xprtrdma_frwr_done, name, \ - TP_PROTO( \ - const struct ib_wc *wc, \ - const struct rpcrdma_frwr *frwr \ - ), \ - TP_ARGS(wc, frwr)) - TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL); TRACE_DEFINE_ENUM(DMA_TO_DEVICE); TRACE_DEFINE_ENUM(DMA_FROM_DEVICE); @@ -850,11 +815,10 @@ TRACE_EVENT(xprtrdma_post_linv, DEFINE_COMPLETION_EVENT(xprtrdma_wc_receive); DEFINE_COMPLETION_EVENT(xprtrdma_wc_send); - -DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg); -DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li); -DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake); -DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_done); +DEFINE_COMPLETION_EVENT(xprtrdma_wc_fastreg); +DEFINE_COMPLETION_EVENT(xprtrdma_wc_li); +DEFINE_COMPLETION_EVENT(xprtrdma_wc_li_wake); +DEFINE_COMPLETION_EVENT(xprtrdma_wc_li_done); TRACE_EVENT(xprtrdma_frwr_alloc, TP_PROTO( diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index 44888f5badef..2cc6862a52dc 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c @@ -363,12 +363,21 @@ static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc) container_of(cqe, struct rpcrdma_frwr, fr_cqe); /* WARNING: Only wr_cqe and status are reliable at this point */ - trace_xprtrdma_wc_fastreg(wc, frwr); + trace_xprtrdma_wc_fastreg(wc, &frwr->fr_cid); /* The MR will get recycled when the associated req is retransmitted */ rpcrdma_flush_disconnect(cq->cq_context, wc); } +static void frwr_cid_init(struct rpcrdma_ep *ep, + struct rpcrdma_frwr *frwr) +{ + struct rpc_rdma_cid *cid = &frwr->fr_cid; + + cid->ci_queue_id = ep->re_attr.send_cq->res.id; + cid->ci_completion_id = frwr->fr_mr->res.id; +} + /** * frwr_send - post Send WRs containing the RPC Call message * @r_xprt: controlling transport instance @@ -385,6 +394,7 @@ static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc) */ int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) { + struct rpcrdma_ep *ep = r_xprt->rx_ep; struct ib_send_wr *post_wr; struct rpcrdma_mr *mr; @@ -395,6 +405,7 @@ int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) frwr = &mr->frwr; frwr->fr_cqe.done = frwr_wc_fastreg; + frwr_cid_init(ep, frwr); frwr->fr_regwr.wr.next = post_wr; frwr->fr_regwr.wr.wr_cqe = &frwr->fr_cqe; frwr->fr_regwr.wr.num_sge = 0; @@ -404,7 +415,7 @@ int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) post_wr = &frwr->fr_regwr.wr; } - return ib_post_send(r_xprt->rx_ep->re_id->qp, post_wr, NULL); + return ib_post_send(ep->re_id->qp, post_wr, NULL); } /** @@ -448,7 +459,7 @@ static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc) struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr); /* WARNING: Only wr_cqe and status are reliable at this point */ - trace_xprtrdma_wc_li(wc, frwr); + trace_xprtrdma_wc_li(wc, &frwr->fr_cid); __frwr_release_mr(wc, mr); rpcrdma_flush_disconnect(cq->cq_context, wc); @@ -469,7 +480,7 @@ static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc) struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr); /* WARNING: Only wr_cqe and status are reliable at this point */ - trace_xprtrdma_wc_li_wake(wc, frwr); + trace_xprtrdma_wc_li_wake(wc, &frwr->fr_cid); __frwr_release_mr(wc, mr); complete(&frwr->fr_linv_done); @@ -490,6 +501,7 @@ static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc) void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) { struct ib_send_wr *first, **prev, *last; + struct rpcrdma_ep *ep = r_xprt->rx_ep; const struct ib_send_wr *bad_wr; struct rpcrdma_frwr *frwr; struct rpcrdma_mr *mr; @@ -509,6 +521,7 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) frwr = &mr->frwr; frwr->fr_cqe.done = frwr_wc_localinv; + frwr_cid_init(ep, frwr); last = &frwr->fr_invwr; last->next = NULL; last->wr_cqe = &frwr->fr_cqe; @@ -534,7 +547,7 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) * unless re_id->qp is a valid pointer. */ bad_wr = NULL; - rc = ib_post_send(r_xprt->rx_ep->re_id->qp, first, &bad_wr); + rc = ib_post_send(ep->re_id->qp, first, &bad_wr); /* The final LOCAL_INV WR in the chain is supposed to * do the wake. If it was never posted, the wake will @@ -574,7 +587,7 @@ static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc) struct rpcrdma_rep *rep = mr->mr_req->rl_reply; /* WARNING: Only wr_cqe and status are reliable at this point */ - trace_xprtrdma_wc_li_done(wc, frwr); + trace_xprtrdma_wc_li_done(wc, &frwr->fr_cid); __frwr_release_mr(wc, mr); /* Ensure @rep is generated before __frwr_release_mr */ @@ -597,6 +610,7 @@ static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc) void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) { struct ib_send_wr *first, *last, **prev; + struct rpcrdma_ep *ep = r_xprt->rx_ep; const struct ib_send_wr *bad_wr; struct rpcrdma_frwr *frwr; struct rpcrdma_mr *mr; @@ -614,6 +628,7 @@ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) frwr = &mr->frwr; frwr->fr_cqe.done = frwr_wc_localinv; + frwr_cid_init(ep, frwr); last = &frwr->fr_invwr; last->next = NULL; last->wr_cqe = &frwr->fr_cqe; @@ -639,7 +654,7 @@ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) * unless re_id->qp is a valid pointer. */ bad_wr = NULL; - rc = ib_post_send(r_xprt->rx_ep->re_id->qp, first, &bad_wr); + rc = ib_post_send(ep->re_id->qp, first, &bad_wr); if (!rc) return; diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index 4eb8e32b9f4a..cef9d0f2e2c8 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h @@ -231,6 +231,7 @@ struct rpcrdma_sendctx { struct rpcrdma_frwr { struct ib_mr *fr_mr; struct ib_cqe fr_cqe; + struct rpc_rdma_cid fr_cid; struct completion fr_linv_done; union { struct ib_reg_wr fr_regwr; -- cgit v1.2.3-58-ga151 From 36a55edfc3d5b1c2735c088bcb06967de103f299 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 9 Nov 2020 14:39:37 -0500 Subject: xprtrdma: Clean up trace_xprtrdma_post_linv - Replace the display of kernel memory addresses - Add "_err" to the end of its name to indicate that it's a tracepoint that fires only when there's an error Signed-off-by: Chuck Lever Signed-off-by: Anna Schumaker --- include/trace/events/rpcrdma.h | 16 +++++++++------- net/sunrpc/xprtrdma/frwr_ops.c | 4 ++-- 2 files changed, 11 insertions(+), 9 deletions(-) (limited to 'net') diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 9e30f8aa3562..b0750c0d2753 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -784,7 +784,7 @@ TRACE_EVENT(xprtrdma_post_recvs, ) ); -TRACE_EVENT(xprtrdma_post_linv, +TRACE_EVENT(xprtrdma_post_linv_err, TP_PROTO( const struct rpcrdma_req *req, int status @@ -793,19 +793,21 @@ TRACE_EVENT(xprtrdma_post_linv, TP_ARGS(req, status), TP_STRUCT__entry( - __field(const void *, req) + __field(unsigned int, task_id) + __field(unsigned int, client_id) __field(int, status) - __field(u32, xid) ), TP_fast_assign( - __entry->req = req; + const struct rpc_task *task = req->rl_slot.rq_task; + + __entry->task_id = task->tk_pid; + __entry->client_id = task->tk_client->cl_clid; __entry->status = status; - __entry->xid = be32_to_cpu(req->rl_slot.rq_xid); ), - TP_printk("req=%p xid=0x%08x status=%d", - __entry->req, __entry->xid, __entry->status + TP_printk("task:%u@%u status=%d", + __entry->task_id, __entry->client_id, __entry->status ) ); diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index 2cc6862a52dc..76322b1acf3d 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c @@ -560,7 +560,7 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) /* Recycle MRs in the LOCAL_INV chain that did not get posted. */ - trace_xprtrdma_post_linv(req, rc); + trace_xprtrdma_post_linv_err(req, rc); while (bad_wr) { frwr = container_of(bad_wr, struct rpcrdma_frwr, fr_invwr); @@ -660,7 +660,7 @@ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) /* Recycle MRs in the LOCAL_INV chain that did not get posted. */ - trace_xprtrdma_post_linv(req, rc); + trace_xprtrdma_post_linv_err(req, rc); while (bad_wr) { frwr = container_of(bad_wr, struct rpcrdma_frwr, fr_invwr); mr = container_of(frwr, struct rpcrdma_mr, frwr); -- cgit v1.2.3-58-ga151 From 3a9568fedccc6cf26c1a87621c3bfed7b7432119 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 9 Nov 2020 14:39:42 -0500 Subject: xprtrdma: Clean up reply parsing error tracepoints - Rename the tracepoints with the "_err" suffix to indicate these are rare error events - Replace display of kernel memory addresses - Tie the XID and error to a connection IP address instead Signed-off-by: Chuck Lever Signed-off-by: Anna Schumaker --- include/trace/events/rpcrdma.h | 27 ++++++++++++++------------- net/sunrpc/xprtrdma/rpc_rdma.c | 10 +++++----- 2 files changed, 19 insertions(+), 18 deletions(-) (limited to 'net') diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index b0750c0d2753..93d717d8139f 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -60,7 +60,7 @@ DECLARE_EVENT_CLASS(rpcrdma_completion_class, ), \ TP_ARGS(wc, cid)) -DECLARE_EVENT_CLASS(xprtrdma_reply_event, +DECLARE_EVENT_CLASS(xprtrdma_reply_class, TP_PROTO( const struct rpcrdma_rep *rep ), @@ -68,29 +68,30 @@ DECLARE_EVENT_CLASS(xprtrdma_reply_event, TP_ARGS(rep), TP_STRUCT__entry( - __field(const void *, rep) - __field(const void *, r_xprt) __field(u32, xid) __field(u32, version) __field(u32, proc) + __string(addr, rpcrdma_addrstr(rep->rr_rxprt)) + __string(port, rpcrdma_portstr(rep->rr_rxprt)) ), TP_fast_assign( - __entry->rep = rep; - __entry->r_xprt = rep->rr_rxprt; __entry->xid = be32_to_cpu(rep->rr_xid); __entry->version = be32_to_cpu(rep->rr_vers); __entry->proc = be32_to_cpu(rep->rr_proc); + __assign_str(addr, rpcrdma_addrstr(rep->rr_rxprt)); + __assign_str(port, rpcrdma_portstr(rep->rr_rxprt)); ), - TP_printk("rxprt %p xid=0x%08x rep=%p: version %u proc %u", - __entry->r_xprt, __entry->xid, __entry->rep, - __entry->version, __entry->proc + TP_printk("peer=[%s]:%s xid=0x%08x version=%u proc=%u", + __get_str(addr), __get_str(port), + __entry->xid, __entry->version, __entry->proc ) ); #define DEFINE_REPLY_EVENT(name) \ - DEFINE_EVENT(xprtrdma_reply_event, name, \ + DEFINE_EVENT(xprtrdma_reply_class, \ + xprtrdma_reply_##name##_err, \ TP_PROTO( \ const struct rpcrdma_rep *rep \ ), \ @@ -1030,10 +1031,10 @@ TRACE_EVENT(xprtrdma_defer_cmp, ) ); -DEFINE_REPLY_EVENT(xprtrdma_reply_vers); -DEFINE_REPLY_EVENT(xprtrdma_reply_rqst); -DEFINE_REPLY_EVENT(xprtrdma_reply_short); -DEFINE_REPLY_EVENT(xprtrdma_reply_hdr); +DEFINE_REPLY_EVENT(vers); +DEFINE_REPLY_EVENT(rqst); +DEFINE_REPLY_EVENT(short); +DEFINE_REPLY_EVENT(hdr); TRACE_EVENT(xprtrdma_err_vers, TP_PROTO( diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index c178f93aa40b..29f847c8f609 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (c) 2014-2017 Oracle. All rights reserved. + * Copyright (c) 2014-2020, Oracle and/or its affiliates. * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. * * This software is available to you under a choice of one of two @@ -1369,7 +1369,7 @@ out: return; out_badheader: - trace_xprtrdma_reply_hdr(rep); + trace_xprtrdma_reply_hdr_err(rep); r_xprt->rx_stats.bad_reply_count++; rqst->rq_task->tk_status = status; status = 0; @@ -1462,16 +1462,16 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep) return; out_badversion: - trace_xprtrdma_reply_vers(rep); + trace_xprtrdma_reply_vers_err(rep); goto out; out_norqst: spin_unlock(&xprt->queue_lock); - trace_xprtrdma_reply_rqst(rep); + trace_xprtrdma_reply_rqst_err(rep); goto out; out_shortreply: - trace_xprtrdma_reply_short(rep); + trace_xprtrdma_reply_short_err(rep); out: rpcrdma_recv_buffer_put(rep); -- cgit v1.2.3-58-ga151 From 03ffd92494a53dcc4b98c909ae1f6787d1fec646 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 9 Nov 2020 14:39:47 -0500 Subject: xprtrdma: Clean up tracepoints in the reply path Replace unnecessary display of kernel memory addresses. Also, there are no longer any trace_xprtrdma_defer_cmp() call sites. And remove the trace_xprtrdma_leaked_rep() tracepoint because there doesn't seem to be an overwhelming need to have a tracepoint for catching a software bug that has long since been fixed. Signed-off-by: Chuck Lever Signed-off-by: Anna Schumaker --- include/trace/events/rpcrdma.h | 66 ++---------------------------------------- net/sunrpc/xprtrdma/rpc_rdma.c | 6 ++-- 2 files changed, 5 insertions(+), 67 deletions(-) (limited to 'net') diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 93d717d8139f..c28bf17e769b 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -974,17 +974,14 @@ TRACE_EVENT(xprtrdma_reply, TP_PROTO( const struct rpc_task *task, const struct rpcrdma_rep *rep, - const struct rpcrdma_req *req, unsigned int credits ), - TP_ARGS(task, rep, req, credits), + TP_ARGS(task, rep, credits), TP_STRUCT__entry( __field(unsigned int, task_id) __field(unsigned int, client_id) - __field(const void *, rep) - __field(const void *, req) __field(u32, xid) __field(unsigned int, credits) ), @@ -992,42 +989,13 @@ TRACE_EVENT(xprtrdma_reply, TP_fast_assign( __entry->task_id = task->tk_pid; __entry->client_id = task->tk_client->cl_clid; - __entry->rep = rep; - __entry->req = req; __entry->xid = be32_to_cpu(rep->rr_xid); __entry->credits = credits; ), - TP_printk("task:%u@%u xid=0x%08x, %u credits, rep=%p -> req=%p", - __entry->task_id, __entry->client_id, __entry->xid, - __entry->credits, __entry->rep, __entry->req - ) -); - -TRACE_EVENT(xprtrdma_defer_cmp, - TP_PROTO( - const struct rpcrdma_rep *rep - ), - - TP_ARGS(rep), - - TP_STRUCT__entry( - __field(unsigned int, task_id) - __field(unsigned int, client_id) - __field(const void *, rep) - __field(u32, xid) - ), - - TP_fast_assign( - __entry->task_id = rep->rr_rqst->rq_task->tk_pid; - __entry->client_id = rep->rr_rqst->rq_task->tk_client->cl_clid; - __entry->rep = rep; - __entry->xid = be32_to_cpu(rep->rr_xid); - ), - - TP_printk("task:%u@%u xid=0x%08x rep=%p", + TP_printk("task:%u@%u xid=0x%08x credits=%u", __entry->task_id, __entry->client_id, __entry->xid, - __entry->rep + __entry->credits ) ); @@ -1212,34 +1180,6 @@ TRACE_EVENT(xprtrdma_cb_setup, DEFINE_CB_EVENT(xprtrdma_cb_call); DEFINE_CB_EVENT(xprtrdma_cb_reply); -TRACE_EVENT(xprtrdma_leaked_rep, - TP_PROTO( - const struct rpc_rqst *rqst, - const struct rpcrdma_rep *rep - ), - - TP_ARGS(rqst, rep), - - TP_STRUCT__entry( - __field(unsigned int, task_id) - __field(unsigned int, client_id) - __field(u32, xid) - __field(const void *, rep) - ), - - TP_fast_assign( - __entry->task_id = rqst->rq_task->tk_pid; - __entry->client_id = rqst->rq_task->tk_client->cl_clid; - __entry->xid = be32_to_cpu(rqst->rq_xid); - __entry->rep = rep; - ), - - TP_printk("task:%u@%u xid=0x%08x rep=%p", - __entry->task_id, __entry->client_id, __entry->xid, - __entry->rep - ) -); - /** ** Server-side RPC/RDMA events **/ diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index 29f847c8f609..8078559bdc31 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c @@ -1443,14 +1443,12 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep) rpcrdma_post_recvs(r_xprt, false); req = rpcr_to_rdmar(rqst); - if (req->rl_reply) { - trace_xprtrdma_leaked_rep(rqst, req->rl_reply); + if (unlikely(req->rl_reply)) rpcrdma_recv_buffer_put(req->rl_reply); - } req->rl_reply = rep; rep->rr_rqst = rqst; - trace_xprtrdma_reply(rqst->rq_task, rep, req, credits); + trace_xprtrdma_reply(rqst->rq_task, rep, credits); if (rep->rr_wc_flags & IB_WC_WITH_INVALIDATE) frwr_reminv(rep, &req->rl_registered); -- cgit v1.2.3-58-ga151 From d11e934606ef6ce37a4bcbe89f34faf37347abb1 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 9 Nov 2020 14:39:53 -0500 Subject: xprtrdma: Clean up xprtrdma callback tracepoints - Replace displayed kernel memory addresses - Tie the XID and event with the peer's IP address Signed-off-by: Chuck Lever Signed-off-by: Anna Schumaker --- include/trace/events/rpcrdma.h | 31 ++++++++++++++++--------------- net/sunrpc/xprtrdma/backchannel.c | 6 +++--- 2 files changed, 19 insertions(+), 18 deletions(-) (limited to 'net') diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index c28bf17e769b..6bdbe1165270 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -313,38 +313,39 @@ DECLARE_EVENT_CLASS(xprtrdma_mr, ), \ TP_ARGS(mr)) -DECLARE_EVENT_CLASS(xprtrdma_cb_event, +DECLARE_EVENT_CLASS(xprtrdma_callback_class, TP_PROTO( + const struct rpcrdma_xprt *r_xprt, const struct rpc_rqst *rqst ), - TP_ARGS(rqst), + TP_ARGS(r_xprt, rqst), TP_STRUCT__entry( - __field(const void *, rqst) - __field(const void *, rep) - __field(const void *, req) __field(u32, xid) + __string(addr, rpcrdma_addrstr(r_xprt)) + __string(port, rpcrdma_portstr(r_xprt)) ), TP_fast_assign( - __entry->rqst = rqst; - __entry->req = rpcr_to_rdmar(rqst); - __entry->rep = rpcr_to_rdmar(rqst)->rl_reply; __entry->xid = be32_to_cpu(rqst->rq_xid); + __assign_str(addr, rpcrdma_addrstr(r_xprt)); + __assign_str(port, rpcrdma_portstr(r_xprt)); ), - TP_printk("xid=0x%08x, rqst=%p req=%p rep=%p", - __entry->xid, __entry->rqst, __entry->req, __entry->rep + TP_printk("peer=[%s]:%s xid=0x%08x", + __get_str(addr), __get_str(port), __entry->xid ) ); -#define DEFINE_CB_EVENT(name) \ - DEFINE_EVENT(xprtrdma_cb_event, name, \ +#define DEFINE_CALLBACK_EVENT(name) \ + DEFINE_EVENT(xprtrdma_callback_class, \ + xprtrdma_cb_##name, \ TP_PROTO( \ + const struct rpcrdma_xprt *r_xprt, \ const struct rpc_rqst *rqst \ ), \ - TP_ARGS(rqst)) + TP_ARGS(r_xprt, rqst)) /** ** Connection events @@ -1177,8 +1178,8 @@ TRACE_EVENT(xprtrdma_cb_setup, ) ); -DEFINE_CB_EVENT(xprtrdma_cb_call); -DEFINE_CB_EVENT(xprtrdma_cb_reply); +DEFINE_CALLBACK_EVENT(call); +DEFINE_CALLBACK_EVENT(reply); /** ** Server-side RPC/RDMA events diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c index c92c1aac270a..946edf2db646 100644 --- a/net/sunrpc/xprtrdma/backchannel.c +++ b/net/sunrpc/xprtrdma/backchannel.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2015 Oracle. All rights reserved. + * Copyright (c) 2015-2020, Oracle and/or its affiliates. * * Support for backward direction RPCs on RPC/RDMA. */ @@ -82,7 +82,7 @@ static int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst) &rqst->rq_snd_buf, rpcrdma_noch_pullup)) return -EIO; - trace_xprtrdma_cb_reply(rqst); + trace_xprtrdma_cb_reply(r_xprt, rqst); return 0; } @@ -260,7 +260,7 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt, */ req = rpcr_to_rdmar(rqst); req->rl_reply = rep; - trace_xprtrdma_cb_call(rqst); + trace_xprtrdma_cb_call(r_xprt, rqst); /* Queue rqst for ULP's callback service */ bc_serv = xprt->bc_serv; -- cgit v1.2.3-58-ga151 From 0307cdec7c3412d7665363ab0cd61fccf82bfb2d Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 9 Nov 2020 14:39:58 -0500 Subject: xprtrdma: Clean up trace_xprtrdma_nomrs() - Rename it following the "_err" suffix convention - Replace display of kernel memory addresses - Tie MR exhaustion to a peer IP address, similar to the createmrs tracepoint Signed-off-by: Chuck Lever Signed-off-by: Anna Schumaker --- include/trace/events/rpcrdma.h | 19 ++++++++++--------- net/sunrpc/xprtrdma/rpc_rdma.c | 2 +- 2 files changed, 11 insertions(+), 10 deletions(-) (limited to 'net') diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 6bdbe1165270..4fcda2a25bb8 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -545,32 +545,33 @@ TRACE_EVENT(xprtrdma_mr_get, ) ); -TRACE_EVENT(xprtrdma_nomrs, +TRACE_EVENT(xprtrdma_nomrs_err, TP_PROTO( + const struct rpcrdma_xprt *r_xprt, const struct rpcrdma_req *req ), - TP_ARGS(req), + TP_ARGS(r_xprt, req), TP_STRUCT__entry( - __field(const void *, req) __field(unsigned int, task_id) __field(unsigned int, client_id) - __field(u32, xid) + __string(addr, rpcrdma_addrstr(r_xprt)) + __string(port, rpcrdma_portstr(r_xprt)) ), TP_fast_assign( const struct rpc_rqst *rqst = &req->rl_slot; - __entry->req = req; __entry->task_id = rqst->rq_task->tk_pid; __entry->client_id = rqst->rq_task->tk_client->cl_clid; - __entry->xid = be32_to_cpu(rqst->rq_xid); + __assign_str(addr, rpcrdma_addrstr(r_xprt)); + __assign_str(port, rpcrdma_portstr(r_xprt)); ), - TP_printk("task:%u@%u xid=0x%08x req=%p", - __entry->task_id, __entry->client_id, __entry->xid, - __entry->req + TP_printk("peer=[%s]:%s task:%u@%u", + __get_str(addr), __get_str(port), + __entry->task_id, __entry->client_id ) ); diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index 8078559bdc31..f27eb2322b38 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c @@ -323,7 +323,7 @@ static struct rpcrdma_mr_seg *rpcrdma_mr_prepare(struct rpcrdma_xprt *r_xprt, return frwr_map(r_xprt, seg, nsegs, writing, req->rl_slot.rq_xid, *mr); out_getmr_err: - trace_xprtrdma_nomrs(req); + trace_xprtrdma_nomrs_err(r_xprt, req); xprt_wait_for_buffer_space(&r_xprt->rx_xprt); rpcrdma_mrs_refresh(r_xprt); return ERR_PTR(-EAGAIN); -- cgit v1.2.3-58-ga151 From 7703db978d4cf7c51426183b7c0d03c039757a44 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 9 Nov 2020 14:40:03 -0500 Subject: xprtrdma: Display the task ID when reporting MR events Tie each MR event to the requesting rpc_task to make it easier to follow MR ownership and control flow. MR unmapping and recycling can happen in the background, after an MR's mr_req field is stale, so set up a separate tracepoint class for those events. Signed-off-by: Chuck Lever Signed-off-by: Anna Schumaker --- include/trace/events/rpcrdma.h | 96 +++++++++++++++++++++++++----------------- net/sunrpc/xprtrdma/frwr_ops.c | 1 - net/sunrpc/xprtrdma/rpc_rdma.c | 1 - 3 files changed, 58 insertions(+), 40 deletions(-) (limited to 'net') diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 4fcda2a25bb8..166bbeef996c 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -274,7 +274,55 @@ TRACE_DEFINE_ENUM(DMA_NONE); { DMA_FROM_DEVICE, "FROM_DEVICE" }, \ { DMA_NONE, "NONE" }) -DECLARE_EVENT_CLASS(xprtrdma_mr, +DECLARE_EVENT_CLASS(xprtrdma_mr_class, + TP_PROTO( + const struct rpcrdma_mr *mr + ), + + TP_ARGS(mr), + + TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(u32, mr_id) + __field(int, nents) + __field(u32, handle) + __field(u32, length) + __field(u64, offset) + __field(u32, dir) + ), + + TP_fast_assign( + const struct rpcrdma_req *req = mr->mr_req; + const struct rpc_task *task = req->rl_slot.rq_task; + + __entry->task_id = task->tk_pid; + __entry->client_id = task->tk_client->cl_clid; + __entry->mr_id = mr->frwr.fr_mr->res.id; + __entry->nents = mr->mr_nents; + __entry->handle = mr->mr_handle; + __entry->length = mr->mr_length; + __entry->offset = mr->mr_offset; + __entry->dir = mr->mr_dir; + ), + + TP_printk("task:%u@%u mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)", + __entry->task_id, __entry->client_id, + __entry->mr_id, __entry->nents, __entry->length, + (unsigned long long)__entry->offset, __entry->handle, + xprtrdma_show_direction(__entry->dir) + ) +); + +#define DEFINE_MR_EVENT(name) \ + DEFINE_EVENT(xprtrdma_mr_class, \ + xprtrdma_mr_##name, \ + TP_PROTO( \ + const struct rpcrdma_mr *mr \ + ), \ + TP_ARGS(mr)) + +DECLARE_EVENT_CLASS(xprtrdma_anonymous_mr_class, TP_PROTO( const struct rpcrdma_mr *mr ), @@ -306,11 +354,12 @@ DECLARE_EVENT_CLASS(xprtrdma_mr, ) ); -#define DEFINE_MR_EVENT(name) \ - DEFINE_EVENT(xprtrdma_mr, xprtrdma_mr_##name, \ - TP_PROTO( \ - const struct rpcrdma_mr *mr \ - ), \ +#define DEFINE_ANON_MR_EVENT(name) \ + DEFINE_EVENT(xprtrdma_anonymous_mr_class, \ + xprtrdma_mr_##name, \ + TP_PROTO( \ + const struct rpcrdma_mr *mr \ + ), \ TP_ARGS(mr)) DECLARE_EVENT_CLASS(xprtrdma_callback_class, @@ -516,35 +565,6 @@ TRACE_EVENT(xprtrdma_createmrs, ) ); -TRACE_EVENT(xprtrdma_mr_get, - TP_PROTO( - const struct rpcrdma_req *req - ), - - TP_ARGS(req), - - TP_STRUCT__entry( - __field(const void *, req) - __field(unsigned int, task_id) - __field(unsigned int, client_id) - __field(u32, xid) - ), - - TP_fast_assign( - const struct rpc_rqst *rqst = &req->rl_slot; - - __entry->req = req; - __entry->task_id = rqst->rq_task->tk_pid; - __entry->client_id = rqst->rq_task->tk_client->cl_clid; - __entry->xid = be32_to_cpu(rqst->rq_xid); - ), - - TP_printk("task:%u@%u xid=0x%08x req=%p", - __entry->task_id, __entry->client_id, __entry->xid, - __entry->req - ) -); - TRACE_EVENT(xprtrdma_nomrs_err, TP_PROTO( const struct rpcrdma_xprt *r_xprt, @@ -946,9 +966,9 @@ TRACE_EVENT(xprtrdma_frwr_maperr, DEFINE_MR_EVENT(localinv); DEFINE_MR_EVENT(map); -DEFINE_MR_EVENT(unmap); -DEFINE_MR_EVENT(reminv); -DEFINE_MR_EVENT(recycle); + +DEFINE_ANON_MR_EVENT(unmap); +DEFINE_ANON_MR_EVENT(recycle); TRACE_EVENT(xprtrdma_dma_maperr, TP_PROTO( diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index 76322b1acf3d..cb2f92409c2f 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c @@ -431,7 +431,6 @@ void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs) list_for_each_entry(mr, mrs, mr_list) if (mr->mr_handle == rep->rr_inv_rkey) { list_del_init(&mr->mr_list); - trace_xprtrdma_mr_reminv(mr); rpcrdma_mr_put(mr); break; /* only one invalidated MR per RPC */ } diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index f27eb2322b38..9ed89872ec75 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c @@ -315,7 +315,6 @@ static struct rpcrdma_mr_seg *rpcrdma_mr_prepare(struct rpcrdma_xprt *r_xprt, *mr = rpcrdma_mr_get(r_xprt); if (!*mr) goto out_getmr_err; - trace_xprtrdma_mr_get(req); (*mr)->mr_req = req; } -- cgit v1.2.3-58-ga151 From 8e24e191d44f49f08f857f0ebc6fe91961cd1a09 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 9 Nov 2020 14:40:08 -0500 Subject: xprtrdma: Trace unmap_sync calls ->buf_free is called nearly once per RPC. Only rarely does xprt_rdma_free() have to do anything, thus tracing every one of these calls seems unnecessary. Instead, just throw a trace event when that one occasional RPC still has MRs that need to be released. xprt_rdma_free() is further micro-optimized to reduce the amount of work done in the common case. Signed-off-by: Chuck Lever Signed-off-by: Anna Schumaker --- include/trace/events/rpcrdma.h | 22 ++++++++++++++++++++++ net/sunrpc/xprtrdma/transport.c | 7 ++++--- 2 files changed, 26 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 166bbeef996c..69e1caf7e882 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -1167,6 +1167,28 @@ TRACE_EVENT(xprtrdma_decode_seg, ) ); +TRACE_EVENT(xprtrdma_mrs_zap, + TP_PROTO( + const struct rpc_task *task + ), + + TP_ARGS(task), + + TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) + ), + + TP_fast_assign( + __entry->task_id = task->tk_pid; + __entry->client_id = task->tk_client->cl_clid; + ), + + TP_printk("task:%u@%u", + __entry->task_id, __entry->client_id + ) +); + /** ** Callback events **/ diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 8915e42240d3..bb3ed3db6c0a 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c @@ -599,11 +599,12 @@ static void xprt_rdma_free(struct rpc_task *task) { struct rpc_rqst *rqst = task->tk_rqstp; - struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt); struct rpcrdma_req *req = rpcr_to_rdmar(rqst); - if (!list_empty(&req->rl_registered)) - frwr_unmap_sync(r_xprt, req); + if (unlikely(!list_empty(&req->rl_registered))) { + trace_xprtrdma_mrs_zap(task); + frwr_unmap_sync(rpcx_to_rdmax(rqst->rq_xprt), req); + } /* XXX: If the RPC is completing because of a signal and * not because a reply was received, we ought to ensure -- cgit v1.2.3-58-ga151 From ef2be5918ff57bea9cd1e63968c73b1358a765ca Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 9 Nov 2020 14:40:14 -0500 Subject: xprtrdma: Move rpcrdma_mr_put() Clean up: This function is now invoked only in frwr_ops.c. The move enables deduplication of the trace_xprtrdma_mr_unmap() call site. Signed-off-by: Chuck Lever Signed-off-by: Anna Schumaker --- net/sunrpc/xprtrdma/frwr_ops.c | 41 ++++++++++++++++++++++++++++------------- net/sunrpc/xprtrdma/verbs.c | 19 ------------------- net/sunrpc/xprtrdma/xprt_rdma.h | 1 - 3 files changed, 28 insertions(+), 33 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index cb2f92409c2f..e93b3457b958 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c @@ -65,18 +65,23 @@ void frwr_release_mr(struct rpcrdma_mr *mr) kfree(mr); } -static void frwr_mr_recycle(struct rpcrdma_mr *mr) +static void frwr_mr_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr) { - struct rpcrdma_xprt *r_xprt = mr->mr_xprt; - - trace_xprtrdma_mr_recycle(mr); - if (mr->mr_dir != DMA_NONE) { trace_xprtrdma_mr_unmap(mr); ib_dma_unmap_sg(r_xprt->rx_ep->re_id->device, mr->mr_sg, mr->mr_nents, mr->mr_dir); mr->mr_dir = DMA_NONE; } +} + +static void frwr_mr_recycle(struct rpcrdma_mr *mr) +{ + struct rpcrdma_xprt *r_xprt = mr->mr_xprt; + + trace_xprtrdma_mr_recycle(mr); + + frwr_mr_unmap(r_xprt, mr); spin_lock(&r_xprt->rx_buf.rb_lock); list_del(&mr->mr_all); @@ -86,6 +91,16 @@ static void frwr_mr_recycle(struct rpcrdma_mr *mr) frwr_release_mr(mr); } +static void frwr_mr_put(struct rpcrdma_mr *mr) +{ + frwr_mr_unmap(mr->mr_xprt, mr); + + /* The MR is returned to the req's MR free list instead + * of to the xprt's MR free list. No spinlock is needed. + */ + rpcrdma_mr_push(mr, &mr->mr_req->rl_free_mrs); +} + /* frwr_reset - Place MRs back on the free list * @req: request to reset * @@ -101,7 +116,7 @@ void frwr_reset(struct rpcrdma_req *req) struct rpcrdma_mr *mr; while ((mr = rpcrdma_mr_pop(&req->rl_registered))) - rpcrdma_mr_put(mr); + frwr_mr_put(mr); } /** @@ -431,17 +446,17 @@ void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs) list_for_each_entry(mr, mrs, mr_list) if (mr->mr_handle == rep->rr_inv_rkey) { list_del_init(&mr->mr_list); - rpcrdma_mr_put(mr); + frwr_mr_put(mr); break; /* only one invalidated MR per RPC */ } } -static void __frwr_release_mr(struct ib_wc *wc, struct rpcrdma_mr *mr) +static void frwr_mr_done(struct ib_wc *wc, struct rpcrdma_mr *mr) { if (wc->status != IB_WC_SUCCESS) frwr_mr_recycle(mr); else - rpcrdma_mr_put(mr); + frwr_mr_put(mr); } /** @@ -459,7 +474,7 @@ static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc) /* WARNING: Only wr_cqe and status are reliable at this point */ trace_xprtrdma_wc_li(wc, &frwr->fr_cid); - __frwr_release_mr(wc, mr); + frwr_mr_done(wc, mr); rpcrdma_flush_disconnect(cq->cq_context, wc); } @@ -480,7 +495,7 @@ static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc) /* WARNING: Only wr_cqe and status are reliable at this point */ trace_xprtrdma_wc_li_wake(wc, &frwr->fr_cid); - __frwr_release_mr(wc, mr); + frwr_mr_done(wc, mr); complete(&frwr->fr_linv_done); rpcrdma_flush_disconnect(cq->cq_context, wc); @@ -587,9 +602,9 @@ static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc) /* WARNING: Only wr_cqe and status are reliable at this point */ trace_xprtrdma_wc_li_done(wc, &frwr->fr_cid); - __frwr_release_mr(wc, mr); + frwr_mr_done(wc, mr); - /* Ensure @rep is generated before __frwr_release_mr */ + /* Ensure @rep is generated before frwr_mr_done */ smp_rmb(); rpcrdma_complete_rqst(rep); diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 63837b5d14e5..ec912cf9c618 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -1184,25 +1184,6 @@ rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt) return mr; } -/** - * rpcrdma_mr_put - DMA unmap an MR and release it - * @mr: MR to release - * - */ -void rpcrdma_mr_put(struct rpcrdma_mr *mr) -{ - struct rpcrdma_xprt *r_xprt = mr->mr_xprt; - - if (mr->mr_dir != DMA_NONE) { - trace_xprtrdma_mr_unmap(mr); - ib_dma_unmap_sg(r_xprt->rx_ep->re_id->device, - mr->mr_sg, mr->mr_nents, mr->mr_dir); - mr->mr_dir = DMA_NONE; - } - - rpcrdma_mr_push(mr, &mr->mr_req->rl_free_mrs); -} - /** * rpcrdma_buffer_get - Get a request buffer * @buffers: Buffer pool from which to obtain a buffer diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index cef9d0f2e2c8..6a45bf241ec0 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h @@ -473,7 +473,6 @@ void rpcrdma_buffer_destroy(struct rpcrdma_buffer *); struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_xprt *r_xprt); struct rpcrdma_mr *rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt); -void rpcrdma_mr_put(struct rpcrdma_mr *mr); void rpcrdma_mrs_refresh(struct rpcrdma_xprt *r_xprt); struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *); -- cgit v1.2.3-58-ga151 From 7a03aeb66c410366acc5439ae2a341f110c4f845 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 9 Nov 2020 14:40:19 -0500 Subject: xprtrdma: Micro-optimize MR DMA-unmapping Now that rpcrdma_ep is no longer part of rpcrdma_xprt, there are four or five serial address dereferences needed to get to the IB device needed for DMA unmapping. Instead, let's use the same pattern that regbufs use: cache a pointer to the device in the MR, and use that as the indication that unmapping is necessary. This also guarantees that the exact same device is used for DMA mapping and unmapping, even if the r_xprt's ep has been replaced. I don't think this can happen today, but future changes might break this assumption. Signed-off-by: Chuck Lever Signed-off-by: Anna Schumaker --- net/sunrpc/xprtrdma/frwr_ops.c | 12 ++++++------ net/sunrpc/xprtrdma/xprt_rdma.h | 1 + 2 files changed, 7 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index e93b3457b958..baca49fe83af 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c @@ -67,11 +67,11 @@ void frwr_release_mr(struct rpcrdma_mr *mr) static void frwr_mr_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr) { - if (mr->mr_dir != DMA_NONE) { + if (mr->mr_device) { trace_xprtrdma_mr_unmap(mr); - ib_dma_unmap_sg(r_xprt->rx_ep->re_id->device, - mr->mr_sg, mr->mr_nents, mr->mr_dir); - mr->mr_dir = DMA_NONE; + ib_dma_unmap_sg(mr->mr_device, mr->mr_sg, mr->mr_nents, + mr->mr_dir); + mr->mr_device = NULL; } } @@ -145,7 +145,7 @@ int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr) mr->mr_xprt = r_xprt; mr->frwr.fr_mr = frmr; - mr->mr_dir = DMA_NONE; + mr->mr_device = NULL; INIT_LIST_HEAD(&mr->mr_list); init_completion(&mr->frwr.fr_linv_done); @@ -330,6 +330,7 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt, mr->mr_dir); if (!dma_nents) goto out_dmamap_err; + mr->mr_device = ep->re_id->device; ibmr = mr->frwr.fr_mr; n = ib_map_mr_sg(ibmr, mr->mr_sg, dma_nents, NULL, PAGE_SIZE); @@ -356,7 +357,6 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt, return seg; out_dmamap_err: - mr->mr_dir = DMA_NONE; trace_xprtrdma_frwr_sgerr(mr, i); return ERR_PTR(-EIO); diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index 6a45bf241ec0..94b28657aeeb 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h @@ -243,6 +243,7 @@ struct rpcrdma_req; struct rpcrdma_mr { struct list_head mr_list; struct rpcrdma_req *mr_req; + struct ib_device *mr_device; struct scatterlist *mr_sg; int mr_nents; enum dma_data_direction mr_dir; -- cgit v1.2.3-58-ga151 From 0359af7ac318495432e5a06f671c80dc29274f18 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 30 Nov 2020 14:58:19 -0500 Subject: SUNRPC: Remove XDRBUF_SPARSE_PAGES flag in gss_proxy upcall There's no need to defer allocation of pages for the receive buffer. - This upcall is quite infrequent - gssp_alloc_receive_pages() can allocate the pages with GFP_KERNEL, unlike the transport - gssp_alloc_receive_pages() knows exactly how many pages are needed Signed-off-by: Chuck Lever Reviewed-by: Olga Kornievskaia Signed-off-by: Anna Schumaker --- net/sunrpc/auth_gss/gss_rpc_upcall.c | 15 ++++++++++----- net/sunrpc/auth_gss/gss_rpc_xdr.c | 1 - 2 files changed, 10 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/sunrpc/auth_gss/gss_rpc_upcall.c b/net/sunrpc/auth_gss/gss_rpc_upcall.c index af9c7f43859c..d1c003a25b0f 100644 --- a/net/sunrpc/auth_gss/gss_rpc_upcall.c +++ b/net/sunrpc/auth_gss/gss_rpc_upcall.c @@ -200,7 +200,7 @@ static int gssp_call(struct net *net, struct rpc_message *msg) static void gssp_free_receive_pages(struct gssx_arg_accept_sec_context *arg) { - int i; + unsigned int i; for (i = 0; i < arg->npages && arg->pages[i]; i++) __free_page(arg->pages[i]); @@ -210,14 +210,19 @@ static void gssp_free_receive_pages(struct gssx_arg_accept_sec_context *arg) static int gssp_alloc_receive_pages(struct gssx_arg_accept_sec_context *arg) { + unsigned int i; + arg->npages = DIV_ROUND_UP(NGROUPS_MAX * 4, PAGE_SIZE); arg->pages = kcalloc(arg->npages, sizeof(struct page *), GFP_KERNEL); - /* - * XXX: actual pages are allocated by xdr layer in - * xdr_partial_copy_from_skb. - */ if (!arg->pages) return -ENOMEM; + for (i = 0; i < arg->npages; i++) { + arg->pages[i] = alloc_page(GFP_KERNEL); + if (!arg->pages[i]) { + gssp_free_receive_pages(arg); + return -ENOMEM; + } + } return 0; } diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c index 2ff7b7083eba..44838f6ea25e 100644 --- a/net/sunrpc/auth_gss/gss_rpc_xdr.c +++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c @@ -771,7 +771,6 @@ void gssx_enc_accept_sec_context(struct rpc_rqst *req, xdr_inline_pages(&req->rq_rcv_buf, PAGE_SIZE/2 /* pretty arbitrary */, arg->pages, 0 /* page base */, arg->npages * PAGE_SIZE); - req->rq_rcv_buf.flags |= XDRBUF_SPARSE_PAGES; done: if (err) dprintk("RPC: gssx_enc_accept_sec_context: %d\n", err); -- cgit v1.2.3-58-ga151 From e4c72201b6ec3173dfe13fa2e2335a3ad78d4921 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 22 Oct 2020 17:40:33 -0400 Subject: SUNRPC: rpc_wake_up() should wake up tasks in the correct order Currently, we wake up the tasks by priority queue ordering, which means that we ignore the batching that is supposed to help with QoS issues. Fixes: c049f8ea9a0d ("SUNRPC: Remove the bh-safe lock requirement on the rpc_wait_queue->lock") Signed-off-by: Trond Myklebust --- net/sunrpc/sched.c | 65 +++++++++++++++++++++++++++++------------------------- 1 file changed, 35 insertions(+), 30 deletions(-) (limited to 'net') diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index f06d7c315017..cf702a5f7fe5 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -675,6 +675,23 @@ struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue) } EXPORT_SYMBOL_GPL(rpc_wake_up_next); +/** + * rpc_wake_up_locked - wake up all rpc_tasks + * @queue: rpc_wait_queue on which the tasks are sleeping + * + */ +static void rpc_wake_up_locked(struct rpc_wait_queue *queue) +{ + struct rpc_task *task; + + for (;;) { + task = __rpc_find_next_queued(queue); + if (task == NULL) + break; + rpc_wake_up_task_queue_locked(queue, task); + } +} + /** * rpc_wake_up - wake up all rpc_tasks * @queue: rpc_wait_queue on which the tasks are sleeping @@ -683,25 +700,28 @@ EXPORT_SYMBOL_GPL(rpc_wake_up_next); */ void rpc_wake_up(struct rpc_wait_queue *queue) { - struct list_head *head; - spin_lock(&queue->lock); - head = &queue->tasks[queue->maxpriority]; + rpc_wake_up_locked(queue); + spin_unlock(&queue->lock); +} +EXPORT_SYMBOL_GPL(rpc_wake_up); + +/** + * rpc_wake_up_status_locked - wake up all rpc_tasks and set their status value. + * @queue: rpc_wait_queue on which the tasks are sleeping + * @status: status value to set + */ +static void rpc_wake_up_status_locked(struct rpc_wait_queue *queue, int status) +{ + struct rpc_task *task; + for (;;) { - while (!list_empty(head)) { - struct rpc_task *task; - task = list_first_entry(head, - struct rpc_task, - u.tk_wait.list); - rpc_wake_up_task_queue_locked(queue, task); - } - if (head == &queue->tasks[0]) + task = __rpc_find_next_queued(queue); + if (task == NULL) break; - head--; + rpc_wake_up_task_queue_set_status_locked(queue, task, status); } - spin_unlock(&queue->lock); } -EXPORT_SYMBOL_GPL(rpc_wake_up); /** * rpc_wake_up_status - wake up all rpc_tasks and set their status value. @@ -712,23 +732,8 @@ EXPORT_SYMBOL_GPL(rpc_wake_up); */ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) { - struct list_head *head; - spin_lock(&queue->lock); - head = &queue->tasks[queue->maxpriority]; - for (;;) { - while (!list_empty(head)) { - struct rpc_task *task; - task = list_first_entry(head, - struct rpc_task, - u.tk_wait.list); - task->tk_status = status; - rpc_wake_up_task_queue_locked(queue, task); - } - if (head == &queue->tasks[0]) - break; - head--; - } + rpc_wake_up_status_locked(queue, status); spin_unlock(&queue->lock); } EXPORT_SYMBOL_GPL(rpc_wake_up_status); -- cgit v1.2.3-58-ga151 From d5aa6b22e2258f05317313ecc02efbb988ed6d38 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 6 Nov 2020 16:33:38 -0500 Subject: SUNRPC: xprt_load_transport() needs to support the netid "rdma6" According to RFC5666, the correct netid for an IPv6 addressed RDMA transport is "rdma6", which we've supported as a mount option since Linux-4.7. The problem is when we try to load the module "xprtrdma6", that will fail, since there is no modulealias of that name. Fixes: 181342c5ebe8 ("xprtrdma: Add rdma6 option to support NFS/RDMA IPv6") Signed-off-by: Trond Myklebust --- include/linux/sunrpc/xprt.h | 1 + net/sunrpc/xprt.c | 65 +++++++++++++++++++++++++++++++---------- net/sunrpc/xprtrdma/module.c | 1 + net/sunrpc/xprtrdma/transport.c | 1 + net/sunrpc/xprtsock.c | 4 +++ 5 files changed, 56 insertions(+), 16 deletions(-) (limited to 'net') diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index a603d48d2b2c..3ac5037d1c3d 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -330,6 +330,7 @@ struct xprt_class { struct rpc_xprt * (*setup)(struct xprt_create *); struct module *owner; char name[32]; + const char * netid[]; }; /* diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index f6c17e75f20e..57f09ea3ef2a 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -151,31 +151,64 @@ out: } EXPORT_SYMBOL_GPL(xprt_unregister_transport); +static void +xprt_class_release(const struct xprt_class *t) +{ + module_put(t->owner); +} + +static const struct xprt_class * +xprt_class_find_by_netid_locked(const char *netid) +{ + const struct xprt_class *t; + unsigned int i; + + list_for_each_entry(t, &xprt_list, list) { + for (i = 0; t->netid[i][0] != '\0'; i++) { + if (strcmp(t->netid[i], netid) != 0) + continue; + if (!try_module_get(t->owner)) + continue; + return t; + } + } + return NULL; +} + +static const struct xprt_class * +xprt_class_find_by_netid(const char *netid) +{ + const struct xprt_class *t; + + spin_lock(&xprt_list_lock); + t = xprt_class_find_by_netid_locked(netid); + if (!t) { + spin_unlock(&xprt_list_lock); + request_module("rpc%s", netid); + spin_lock(&xprt_list_lock); + t = xprt_class_find_by_netid_locked(netid); + } + spin_unlock(&xprt_list_lock); + return t; +} + /** * xprt_load_transport - load a transport implementation - * @transport_name: transport to load + * @netid: transport to load * * Returns: * 0: transport successfully loaded * -ENOENT: transport module not available */ -int xprt_load_transport(const char *transport_name) +int xprt_load_transport(const char *netid) { - struct xprt_class *t; - int result; + const struct xprt_class *t; - result = 0; - spin_lock(&xprt_list_lock); - list_for_each_entry(t, &xprt_list, list) { - if (strcmp(t->name, transport_name) == 0) { - spin_unlock(&xprt_list_lock); - goto out; - } - } - spin_unlock(&xprt_list_lock); - result = request_module("xprt%s", transport_name); -out: - return result; + t = xprt_class_find_by_netid(netid); + if (!t) + return -ENOENT; + xprt_class_release(t); + return 0; } EXPORT_SYMBOL_GPL(xprt_load_transport); diff --git a/net/sunrpc/xprtrdma/module.c b/net/sunrpc/xprtrdma/module.c index 620327c01302..45c5b41ac8dc 100644 --- a/net/sunrpc/xprtrdma/module.c +++ b/net/sunrpc/xprtrdma/module.c @@ -24,6 +24,7 @@ MODULE_DESCRIPTION("RPC/RDMA Transport"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_ALIAS("svcrdma"); MODULE_ALIAS("xprtrdma"); +MODULE_ALIAS("rpcrdma6"); static void __exit rpc_rdma_cleanup(void) { diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 8915e42240d3..035060c05fd5 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c @@ -768,6 +768,7 @@ static struct xprt_class xprt_rdma = { .owner = THIS_MODULE, .ident = XPRT_TRANSPORT_RDMA, .setup = xprt_setup_rdma, + .netid = { "rdma", "rdma6", "" }, }; void xprt_rdma_cleanup(void) diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 7090bbee0ec5..c93ff70da3f9 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -3059,6 +3059,7 @@ static struct xprt_class xs_local_transport = { .owner = THIS_MODULE, .ident = XPRT_TRANSPORT_LOCAL, .setup = xs_setup_local, + .netid = { "" }, }; static struct xprt_class xs_udp_transport = { @@ -3067,6 +3068,7 @@ static struct xprt_class xs_udp_transport = { .owner = THIS_MODULE, .ident = XPRT_TRANSPORT_UDP, .setup = xs_setup_udp, + .netid = { "udp", "udp6", "" }, }; static struct xprt_class xs_tcp_transport = { @@ -3075,6 +3077,7 @@ static struct xprt_class xs_tcp_transport = { .owner = THIS_MODULE, .ident = XPRT_TRANSPORT_TCP, .setup = xs_setup_tcp, + .netid = { "tcp", "tcp6", "" }, }; static struct xprt_class xs_bc_tcp_transport = { @@ -3083,6 +3086,7 @@ static struct xprt_class xs_bc_tcp_transport = { .owner = THIS_MODULE, .ident = XPRT_TRANSPORT_BC_TCP, .setup = xs_setup_bc_tcp, + .netid = { "" }, }; /** -- cgit v1.2.3-58-ga151 From 9bccd264611b5345d85138dc7fd55bdeb9e6942e Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 10 Nov 2020 12:58:22 -0500 Subject: SUNRPC: Close a race with transport setup and module put After we've looked up the transport module, we need to ensure it can't go away until we've finished running the transport setup code. Signed-off-by: Trond Myklebust --- net/sunrpc/xprt.c | 44 +++++++++++++++++++++++++++++++++----------- 1 file changed, 33 insertions(+), 11 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 57f09ea3ef2a..bf490d0c98c6 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -157,6 +157,32 @@ xprt_class_release(const struct xprt_class *t) module_put(t->owner); } +static const struct xprt_class * +xprt_class_find_by_ident_locked(int ident) +{ + const struct xprt_class *t; + + list_for_each_entry(t, &xprt_list, list) { + if (t->ident != ident) + continue; + if (!try_module_get(t->owner)) + continue; + return t; + } + return NULL; +} + +static const struct xprt_class * +xprt_class_find_by_ident(int ident) +{ + const struct xprt_class *t; + + spin_lock(&xprt_list_lock); + t = xprt_class_find_by_ident_locked(ident); + spin_unlock(&xprt_list_lock); + return t; +} + static const struct xprt_class * xprt_class_find_by_netid_locked(const char *netid) { @@ -1929,21 +1955,17 @@ static void xprt_init(struct rpc_xprt *xprt, struct net *net) struct rpc_xprt *xprt_create_transport(struct xprt_create *args) { struct rpc_xprt *xprt; - struct xprt_class *t; + const struct xprt_class *t; - spin_lock(&xprt_list_lock); - list_for_each_entry(t, &xprt_list, list) { - if (t->ident == args->ident) { - spin_unlock(&xprt_list_lock); - goto found; - } + t = xprt_class_find_by_ident(args->ident); + if (!t) { + dprintk("RPC: transport (%d) not supported\n", args->ident); + return ERR_PTR(-EIO); } - spin_unlock(&xprt_list_lock); - dprintk("RPC: transport (%d) not supported\n", args->ident); - return ERR_PTR(-EIO); -found: xprt = t->setup(args); + xprt_class_release(t); + if (IS_ERR(xprt)) goto out; if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT) -- cgit v1.2.3-58-ga151 From 1fc5f13186440973e1aa1d85aa263326756af431 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 10 Nov 2020 09:41:21 -0500 Subject: SUNRPC: Add a helper to return the transport identifier given a netid Signed-off-by: Trond Myklebust --- include/linux/sunrpc/xprt.h | 1 + net/sunrpc/xprt.c | 25 +++++++++++++++++++++---- 2 files changed, 22 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 3ac5037d1c3d..f7b75c72f80e 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -386,6 +386,7 @@ xprt_disable_swap(struct rpc_xprt *xprt) int xprt_register_transport(struct xprt_class *type); int xprt_unregister_transport(struct xprt_class *type); int xprt_load_transport(const char *); +int xprt_find_transport_ident(const char *); void xprt_wait_for_reply_request_def(struct rpc_task *task); void xprt_wait_for_reply_request_rtt(struct rpc_task *task); void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status); diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index bf490d0c98c6..23452f57d369 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -219,22 +219,39 @@ xprt_class_find_by_netid(const char *netid) } /** - * xprt_load_transport - load a transport implementation + * xprt_find_transport_ident - convert a netid into a transport identifier * @netid: transport to load * * Returns: - * 0: transport successfully loaded + * > 0: transport identifier * -ENOENT: transport module not available */ -int xprt_load_transport(const char *netid) +int xprt_find_transport_ident(const char *netid) { const struct xprt_class *t; + int ret; t = xprt_class_find_by_netid(netid); if (!t) return -ENOENT; + ret = t->ident; xprt_class_release(t); - return 0; + return ret; +} +EXPORT_SYMBOL_GPL(xprt_find_transport_ident); + +/** + * xprt_load_transport - load a transport implementation + * @netid: transport to load + * + * Returns: + * 0: transport successfully loaded + * -ENOENT: transport module not available + */ +int xprt_load_transport(const char *netid) +{ + int ret = xprt_find_transport_ident(netid); + return ret < 0 ? ret : 0; } EXPORT_SYMBOL_GPL(xprt_load_transport); -- cgit v1.2.3-58-ga151 From c87b056e58e71ba7a3f603700618f8da9742aa29 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 10 Nov 2020 10:32:14 -0500 Subject: SUNRPC: Remove unused function xprt_load_transport() Signed-off-by: Trond Myklebust --- include/linux/sunrpc/xprt.h | 1 - net/sunrpc/xprt.c | 15 --------------- 2 files changed, 16 deletions(-) (limited to 'net') diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index f7b75c72f80e..d2e97ee802af 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -385,7 +385,6 @@ xprt_disable_swap(struct rpc_xprt *xprt) */ int xprt_register_transport(struct xprt_class *type); int xprt_unregister_transport(struct xprt_class *type); -int xprt_load_transport(const char *); int xprt_find_transport_ident(const char *); void xprt_wait_for_reply_request_def(struct rpc_task *task); void xprt_wait_for_reply_request_rtt(struct rpc_task *task); diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 23452f57d369..691ccf8049a4 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -240,21 +240,6 @@ int xprt_find_transport_ident(const char *netid) } EXPORT_SYMBOL_GPL(xprt_find_transport_ident); -/** - * xprt_load_transport - load a transport implementation - * @netid: transport to load - * - * Returns: - * 0: transport successfully loaded - * -ENOENT: transport module not available - */ -int xprt_load_transport(const char *netid) -{ - int ret = xprt_find_transport_ident(netid); - return ret < 0 ? ret : 0; -} -EXPORT_SYMBOL_GPL(xprt_load_transport); - static void xprt_clear_locked(struct rpc_xprt *xprt) { xprt->snd_task = NULL; -- cgit v1.2.3-58-ga151 From 4aceaaea5eccd32bc40c6c76b262489b2f53ca8d Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 10 Nov 2020 10:56:53 -0500 Subject: SUNRPC: Fix up open coded kmemdup_nul() Signed-off-by: Trond Myklebust --- net/sunrpc/xdr.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 71e03b930b70..b1cda6d85ded 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -1942,10 +1942,8 @@ ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str, ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen); if (ret > 0) { - char *s = kmalloc(ret + 1, gfp_flags); + char *s = kmemdup_nul(p, ret, gfp_flags); if (s != NULL) { - memcpy(s, p, ret); - s[ret] = '\0'; *str = s; return strlen(s); } -- cgit v1.2.3-58-ga151 From 2b1f83d108bd35d12d8a833298d2a033f9121aac Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 21 Nov 2020 14:01:50 -0500 Subject: SUNRPC: Fix up typo in xdr_init_decode() We already know that the head buffer and page are empty, so if there is any data, it is in the tail. Signed-off-by: Trond Myklebust --- net/sunrpc/xdr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index b1cda6d85ded..bc7a622016ee 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -1060,7 +1060,7 @@ void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p, else if (buf->page_len != 0) xdr_set_page_base(xdr, 0, buf->len); else - xdr_set_iov(xdr, buf->head, buf->len); + xdr_set_iov(xdr, buf->tail, buf->len); if (p != NULL && p > xdr->p && xdr->end >= p) { xdr->nwords -= p - xdr->p; xdr->p = p; -- cgit v1.2.3-58-ga151 From 8d86e373b0ef52d091ced9583ffbb33ad2771576 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 21 Nov 2020 14:50:43 -0500 Subject: SUNRPC: Clean up helpers xdr_set_iov() and xdr_set_page_base() Allow xdr_set_iov() to set a base so that we can use it to set the cursor to a specific position in the kvec buffer. If the new base overflows the kvec/pages buffer in either xdr_set_iov() or xdr_set_page_base(), then truncate it so that we point to the end of the buffer. Finally, change both function to return the number of bytes remaining to read in their buffers. Signed-off-by: Trond Myklebust --- net/sunrpc/xdr.c | 36 +++++++++++++++++++----------------- 1 file changed, 19 insertions(+), 17 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index bc7a622016ee..394297ec1cb9 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -970,19 +970,22 @@ void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int b } EXPORT_SYMBOL_GPL(xdr_write_pages); -static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov, - unsigned int len) +static unsigned int xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov, + unsigned int base, unsigned int len) { if (len > iov->iov_len) len = iov->iov_len; - xdr->p = (__be32*)iov->iov_base; + if (unlikely(base > len)) + base = len; + xdr->p = (__be32*)(iov->iov_base + base); xdr->end = (__be32*)(iov->iov_base + len); xdr->iov = iov; xdr->page_ptr = NULL; + return len - base; } -static int xdr_set_page_base(struct xdr_stream *xdr, - unsigned int base, unsigned int len) +static unsigned int xdr_set_page_base(struct xdr_stream *xdr, + unsigned int base, unsigned int len) { unsigned int pgnr; unsigned int maxlen; @@ -991,9 +994,11 @@ static int xdr_set_page_base(struct xdr_stream *xdr, void *kaddr; maxlen = xdr->buf->page_len; - if (base >= maxlen) - return -EINVAL; - maxlen -= base; + if (base >= maxlen) { + base = maxlen; + maxlen = 0; + } else + maxlen -= base; if (len > maxlen) len = maxlen; @@ -1011,14 +1016,14 @@ static int xdr_set_page_base(struct xdr_stream *xdr, pgend = PAGE_SIZE; xdr->end = (__be32*)(kaddr + pgend); xdr->iov = NULL; - return 0; + return len; } static void xdr_set_page(struct xdr_stream *xdr, unsigned int base, unsigned int len) { - if (xdr_set_page_base(xdr, base, len) < 0) - xdr_set_iov(xdr, xdr->buf->tail, xdr->nwords << 2); + if (xdr_set_page_base(xdr, base, len) == 0) + xdr_set_iov(xdr, xdr->buf->tail, 0, xdr_stream_remaining(xdr)); } static void xdr_set_next_page(struct xdr_stream *xdr) @@ -1055,12 +1060,9 @@ void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p, xdr->scratch.iov_base = NULL; xdr->scratch.iov_len = 0; xdr->nwords = XDR_QUADLEN(buf->len); - if (buf->head[0].iov_len != 0) - xdr_set_iov(xdr, buf->head, buf->len); - else if (buf->page_len != 0) - xdr_set_page_base(xdr, 0, buf->len); - else - xdr_set_iov(xdr, buf->tail, buf->len); + if (xdr_set_iov(xdr, buf->head, 0, buf->len) == 0 && + xdr_set_page_base(xdr, 0, buf->len) == 0) + xdr_set_iov(xdr, buf->tail, 0, buf->len); if (p != NULL && p > xdr->p && xdr->end >= p) { xdr->nwords -= p - xdr->p; xdr->p = p; -- cgit v1.2.3-58-ga151 From 1d97316692f708de755655ac1cfd704d7a55843f Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 20 Nov 2020 16:31:03 -0500 Subject: SUNRPC: Fix up xdr_read_pages() to take arbitrary object lengths Fix up xdr_read_pages() so that it can handle object lengths that are larger than the page length, by simply aligning to the next object in the buffer tail. The function will continue to return the length of the truncate object data that actually fit into the pages. Signed-off-by: Trond Myklebust --- net/sunrpc/xdr.c | 41 +++++++++++++++-------------------------- 1 file changed, 15 insertions(+), 26 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 394297ec1cb9..3ce0a5daa9eb 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -1219,44 +1219,33 @@ static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len) } /** - * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position + * xdr_read_pages - align page-based XDR data to current pointer position * @xdr: pointer to xdr_stream struct * @len: number of bytes of page data * * Moves data beyond the current pointer position from the XDR head[] buffer - * into the page list. Any data that lies beyond current position + "len" - * bytes is moved into the XDR tail[]. + * into the page list. Any data that lies beyond current position + @len + * bytes is moved into the XDR tail[]. The xdr_stream current position is + * then advanced past that data to align to the next XDR object in the tail. * * Returns the number of XDR encoded bytes now contained in the pages */ unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len) { - struct xdr_buf *buf = xdr->buf; - struct kvec *iov; - unsigned int nwords; - unsigned int end; - unsigned int padding; + unsigned int nwords = XDR_QUADLEN(len); + unsigned int base, end, pglen; - len = xdr_align_pages(xdr, len); - if (len == 0) + pglen = xdr_align_pages(xdr, nwords << 2); + if (pglen == 0) return 0; - nwords = XDR_QUADLEN(len); - padding = (nwords << 2) - len; - xdr->iov = iov = buf->tail; - /* Compute remaining message length. */ - end = ((xdr->nwords - nwords) << 2) + padding; - if (end > iov->iov_len) - end = iov->iov_len; - /* - * Position current pointer at beginning of tail, and - * set remaining message length. - */ - xdr->p = (__be32 *)((char *)iov->iov_base + padding); - xdr->end = (__be32 *)((char *)iov->iov_base + end); - xdr->page_ptr = NULL; - xdr->nwords = XDR_QUADLEN(end - padding); - return len; + xdr->nwords -= nwords; + base = (nwords << 2) - pglen; + end = xdr_stream_remaining(xdr) - pglen; + + if (xdr_set_iov(xdr, xdr->buf->tail, base, end) == 0) + xdr->nwords = 0; + return len <= pglen ? len : pglen; } EXPORT_SYMBOL_GPL(xdr_read_pages); -- cgit v1.2.3-58-ga151 From 9ed5af268e88f6e5b65376be98d652b37cb20d7b Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 21 Nov 2020 20:46:18 -0500 Subject: SUNRPC: Clean up the handling of page padding in rpc_prepare_reply_pages() rpc_prepare_reply_pages() currently expects the 'hdrsize' argument to contain the length of the data that we expect to want placed in the head kvec plus a count of 1 word of padding that is placed after the page data. This is very confusing when trying to read the code, and sometimes leads to callers adding an arbitrary value of '1' just in order to satisfy the requirement (whether or not the page data actually needs such padding). This patch aims to clarify the code by changing the 'hdrsize' argument to remove that 1 word of padding. This means we need to subtract the padding from all the existing callers. Fixes: 02ef04e432ba ("NFS: Account for XDR pad of buf->pages") Signed-off-by: Trond Myklebust --- fs/nfs/nfs2xdr.c | 19 ++++++++++--------- fs/nfs/nfs3xdr.c | 29 ++++++++++++++++------------- fs/nfs/nfs4xdr.c | 36 +++++++++++++++++++----------------- net/sunrpc/clnt.c | 5 +---- net/sunrpc/xdr.c | 3 --- 5 files changed, 46 insertions(+), 46 deletions(-) (limited to 'net') diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c index f6676af37d5d..7fba7711e6b3 100644 --- a/fs/nfs/nfs2xdr.c +++ b/fs/nfs/nfs2xdr.c @@ -34,6 +34,7 @@ * Declare the space requirements for NFS arguments and replies as * number of 32bit-words */ +#define NFS_pagepad_sz (1) /* Page padding */ #define NFS_fhandle_sz (8) #define NFS_sattr_sz (8) #define NFS_filename_sz (1+(NFS2_MAXNAMLEN>>2)) @@ -56,11 +57,11 @@ #define NFS_attrstat_sz (1+NFS_fattr_sz) #define NFS_diropres_sz (1+NFS_fhandle_sz+NFS_fattr_sz) -#define NFS_readlinkres_sz (2+1) -#define NFS_readres_sz (1+NFS_fattr_sz+1+1) +#define NFS_readlinkres_sz (2+NFS_pagepad_sz) +#define NFS_readres_sz (1+NFS_fattr_sz+1+NFS_pagepad_sz) #define NFS_writeres_sz (NFS_attrstat_sz) #define NFS_stat_sz (1) -#define NFS_readdirres_sz (1+1) +#define NFS_readdirres_sz (1+NFS_pagepad_sz) #define NFS_statfsres_sz (1+NFS_info_sz) static int nfs_stat_to_errno(enum nfs_stat); @@ -592,8 +593,8 @@ static void nfs2_xdr_enc_readlinkargs(struct rpc_rqst *req, const struct nfs_readlinkargs *args = data; encode_fhandle(xdr, args->fh); - rpc_prepare_reply_pages(req, args->pages, args->pgbase, - args->pglen, NFS_readlinkres_sz); + rpc_prepare_reply_pages(req, args->pages, args->pgbase, args->pglen, + NFS_readlinkres_sz - NFS_pagepad_sz); } /* @@ -628,8 +629,8 @@ static void nfs2_xdr_enc_readargs(struct rpc_rqst *req, const struct nfs_pgio_args *args = data; encode_readargs(xdr, args); - rpc_prepare_reply_pages(req, args->pages, args->pgbase, - args->count, NFS_readres_sz); + rpc_prepare_reply_pages(req, args->pages, args->pgbase, args->count, + NFS_readres_sz - NFS_pagepad_sz); req->rq_rcv_buf.flags |= XDRBUF_READ; } @@ -786,8 +787,8 @@ static void nfs2_xdr_enc_readdirargs(struct rpc_rqst *req, const struct nfs_readdirargs *args = data; encode_readdirargs(xdr, args); - rpc_prepare_reply_pages(req, args->pages, 0, - args->count, NFS_readdirres_sz); + rpc_prepare_reply_pages(req, args->pages, 0, args->count, + NFS_readdirres_sz - NFS_pagepad_sz); } /* diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index 69971f6c840d..ca10072644ff 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c @@ -33,6 +33,7 @@ * Declare the space requirements for NFS arguments and replies as * number of 32bit-words */ +#define NFS3_pagepad_sz (1) /* Page padding */ #define NFS3_fhandle_sz (1+16) #define NFS3_fh_sz (NFS3_fhandle_sz) /* shorthand */ #define NFS3_sattr_sz (15) @@ -69,13 +70,13 @@ #define NFS3_removeres_sz (NFS3_setattrres_sz) #define NFS3_lookupres_sz (1+NFS3_fh_sz+(2 * NFS3_post_op_attr_sz)) #define NFS3_accessres_sz (1+NFS3_post_op_attr_sz+1) -#define NFS3_readlinkres_sz (1+NFS3_post_op_attr_sz+1+1) -#define NFS3_readres_sz (1+NFS3_post_op_attr_sz+3+1) +#define NFS3_readlinkres_sz (1+NFS3_post_op_attr_sz+1+NFS3_pagepad_sz) +#define NFS3_readres_sz (1+NFS3_post_op_attr_sz+3+NFS3_pagepad_sz) #define NFS3_writeres_sz (1+NFS3_wcc_data_sz+4) #define NFS3_createres_sz (1+NFS3_fh_sz+NFS3_post_op_attr_sz+NFS3_wcc_data_sz) #define NFS3_renameres_sz (1+(2 * NFS3_wcc_data_sz)) #define NFS3_linkres_sz (1+NFS3_post_op_attr_sz+NFS3_wcc_data_sz) -#define NFS3_readdirres_sz (1+NFS3_post_op_attr_sz+2+1) +#define NFS3_readdirres_sz (1+NFS3_post_op_attr_sz+2+NFS3_pagepad_sz) #define NFS3_fsstatres_sz (1+NFS3_post_op_attr_sz+13) #define NFS3_fsinfores_sz (1+NFS3_post_op_attr_sz+12) #define NFS3_pathconfres_sz (1+NFS3_post_op_attr_sz+6) @@ -85,7 +86,8 @@ #define ACL3_setaclargs_sz (NFS3_fh_sz+1+ \ XDR_QUADLEN(NFS_ACL_INLINE_BUFSIZE)) #define ACL3_getaclres_sz (1+NFS3_post_op_attr_sz+1+ \ - XDR_QUADLEN(NFS_ACL_INLINE_BUFSIZE)+1) + XDR_QUADLEN(NFS_ACL_INLINE_BUFSIZE)+\ + NFS3_pagepad_sz) #define ACL3_setaclres_sz (1+NFS3_post_op_attr_sz) static int nfs3_stat_to_errno(enum nfs_stat); @@ -909,8 +911,8 @@ static void nfs3_xdr_enc_readlink3args(struct rpc_rqst *req, const struct nfs3_readlinkargs *args = data; encode_nfs_fh3(xdr, args->fh); - rpc_prepare_reply_pages(req, args->pages, args->pgbase, - args->pglen, NFS3_readlinkres_sz); + rpc_prepare_reply_pages(req, args->pages, args->pgbase, args->pglen, + NFS3_readlinkres_sz - NFS3_pagepad_sz); } /* @@ -939,7 +941,8 @@ static void nfs3_xdr_enc_read3args(struct rpc_rqst *req, const void *data) { const struct nfs_pgio_args *args = data; - unsigned int replen = args->replen ? args->replen : NFS3_readres_sz; + unsigned int replen = args->replen ? args->replen : + NFS3_readres_sz - NFS3_pagepad_sz; encode_read3args(xdr, args); rpc_prepare_reply_pages(req, args->pages, args->pgbase, @@ -1239,8 +1242,8 @@ static void nfs3_xdr_enc_readdir3args(struct rpc_rqst *req, const struct nfs3_readdirargs *args = data; encode_readdir3args(xdr, args); - rpc_prepare_reply_pages(req, args->pages, 0, - args->count, NFS3_readdirres_sz); + rpc_prepare_reply_pages(req, args->pages, 0, args->count, + NFS3_readdirres_sz - NFS3_pagepad_sz); } /* @@ -1281,8 +1284,8 @@ static void nfs3_xdr_enc_readdirplus3args(struct rpc_rqst *req, const struct nfs3_readdirargs *args = data; encode_readdirplus3args(xdr, args); - rpc_prepare_reply_pages(req, args->pages, 0, - args->count, NFS3_readdirres_sz); + rpc_prepare_reply_pages(req, args->pages, 0, args->count, + NFS3_readdirres_sz - NFS3_pagepad_sz); } /* @@ -1328,7 +1331,7 @@ static void nfs3_xdr_enc_getacl3args(struct rpc_rqst *req, if (args->mask & (NFS_ACL | NFS_DFACL)) { rpc_prepare_reply_pages(req, args->pages, 0, NFSACL_MAXPAGES << PAGE_SHIFT, - ACL3_getaclres_sz); + ACL3_getaclres_sz - NFS3_pagepad_sz); req->rq_rcv_buf.flags |= XDRBUF_SPARSE_PAGES; } } @@ -1648,7 +1651,7 @@ static int nfs3_xdr_dec_read3res(struct rpc_rqst *req, struct xdr_stream *xdr, result->op_status = status; if (status != NFS3_OK) goto out_status; - result->replen = 4 + ((xdr_stream_pos(xdr) - pos) >> 2); + result->replen = 3 + ((xdr_stream_pos(xdr) - pos) >> 2); error = decode_read3resok(xdr, result); out: return error; diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index c16b93df1bc1..3899ef3047f4 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -84,6 +84,7 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req, /* lock,open owner id: * we currently use size 2 (u64) out of (NFS4_OPAQUE_LIMIT >> 2) */ +#define pagepad_maxsz (1) #define open_owner_id_maxsz (1 + 2 + 1 + 1 + 2) #define lock_owner_id_maxsz (1 + 1 + 4) #define decode_lockowner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ)) @@ -215,14 +216,14 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req, nfs4_fattr_bitmap_maxsz) #define encode_read_maxsz (op_encode_hdr_maxsz + \ encode_stateid_maxsz + 3) -#define decode_read_maxsz (op_decode_hdr_maxsz + 2 + 1) +#define decode_read_maxsz (op_decode_hdr_maxsz + 2 + pagepad_maxsz) #define encode_readdir_maxsz (op_encode_hdr_maxsz + \ 2 + encode_verifier_maxsz + 5 + \ nfs4_label_maxsz) #define decode_readdir_maxsz (op_decode_hdr_maxsz + \ - decode_verifier_maxsz + 1) + decode_verifier_maxsz + pagepad_maxsz) #define encode_readlink_maxsz (op_encode_hdr_maxsz) -#define decode_readlink_maxsz (op_decode_hdr_maxsz + 1 + 1) +#define decode_readlink_maxsz (op_decode_hdr_maxsz + 1 + pagepad_maxsz) #define encode_write_maxsz (op_encode_hdr_maxsz + \ encode_stateid_maxsz + 4) #define decode_write_maxsz (op_decode_hdr_maxsz + \ @@ -284,14 +285,14 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req, #define decode_delegreturn_maxsz (op_decode_hdr_maxsz) #define encode_getacl_maxsz (encode_getattr_maxsz) #define decode_getacl_maxsz (op_decode_hdr_maxsz + \ - nfs4_fattr_bitmap_maxsz + 1 + 1) + nfs4_fattr_bitmap_maxsz + 1 + pagepad_maxsz) #define encode_setacl_maxsz (op_encode_hdr_maxsz + \ encode_stateid_maxsz + 3) #define decode_setacl_maxsz (decode_setattr_maxsz) #define encode_fs_locations_maxsz \ (encode_getattr_maxsz) #define decode_fs_locations_maxsz \ - (1) + (pagepad_maxsz) #define encode_secinfo_maxsz (op_encode_hdr_maxsz + nfs4_name_maxsz) #define decode_secinfo_maxsz (op_decode_hdr_maxsz + 1 + ((NFS_MAX_SECFLAVORS * (16 + GSS_OID_MAX_LEN)) / 4)) @@ -393,12 +394,13 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req, /* devaddr4 payload is read into page */ \ 1 /* notification bitmap length */ + \ 1 /* notification bitmap, word 0 */ + \ - 1 /* possible XDR padding */) + pagepad_maxsz /* possible XDR padding */) #define encode_layoutget_maxsz (op_encode_hdr_maxsz + 10 + \ encode_stateid_maxsz) #define decode_layoutget_maxsz (op_decode_hdr_maxsz + 8 + \ decode_stateid_maxsz + \ - XDR_QUADLEN(PNFS_LAYOUT_MAXSIZE) + 1) + XDR_QUADLEN(PNFS_LAYOUT_MAXSIZE) + \ + pagepad_maxsz) #define encode_layoutcommit_maxsz (op_encode_hdr_maxsz + \ 2 /* offset */ + \ 2 /* length */ + \ @@ -2342,7 +2344,7 @@ static void nfs4_xdr_enc_open(struct rpc_rqst *req, struct xdr_stream *xdr, encode_layoutget(xdr, args->lg_args, &hdr); rpc_prepare_reply_pages(req, args->lg_args->layout.pages, 0, args->lg_args->layout.pglen, - hdr.replen); + hdr.replen - pagepad_maxsz); } encode_nops(&hdr); } @@ -2388,7 +2390,7 @@ static void nfs4_xdr_enc_open_noattr(struct rpc_rqst *req, encode_layoutget(xdr, args->lg_args, &hdr); rpc_prepare_reply_pages(req, args->lg_args->layout.pages, 0, args->lg_args->layout.pglen, - hdr.replen); + hdr.replen - pagepad_maxsz); } encode_nops(&hdr); } @@ -2499,7 +2501,7 @@ static void nfs4_xdr_enc_readlink(struct rpc_rqst *req, struct xdr_stream *xdr, encode_readlink(xdr, args, req, &hdr); rpc_prepare_reply_pages(req, args->pages, args->pgbase, - args->pglen, hdr.replen); + args->pglen, hdr.replen - pagepad_maxsz); encode_nops(&hdr); } @@ -2520,7 +2522,7 @@ static void nfs4_xdr_enc_readdir(struct rpc_rqst *req, struct xdr_stream *xdr, encode_readdir(xdr, args, req, &hdr); rpc_prepare_reply_pages(req, args->pages, args->pgbase, - args->count, hdr.replen); + args->count, hdr.replen - pagepad_maxsz); encode_nops(&hdr); } @@ -2541,7 +2543,7 @@ static void nfs4_xdr_enc_read(struct rpc_rqst *req, struct xdr_stream *xdr, encode_read(xdr, args, &hdr); rpc_prepare_reply_pages(req, args->pages, args->pgbase, - args->count, hdr.replen); + args->count, hdr.replen - pagepad_maxsz); req->rq_rcv_buf.flags |= XDRBUF_READ; encode_nops(&hdr); } @@ -2588,7 +2590,7 @@ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr, ARRAY_SIZE(nfs4_acl_bitmap), &hdr); rpc_prepare_reply_pages(req, args->acl_pages, 0, - args->acl_len, replen + 1); + args->acl_len, replen); encode_nops(&hdr); } @@ -2810,7 +2812,7 @@ static void nfs4_xdr_enc_fs_locations(struct rpc_rqst *req, } rpc_prepare_reply_pages(req, (struct page **)&args->page, 0, - PAGE_SIZE, replen + 1); + PAGE_SIZE, replen); encode_nops(&hdr); } @@ -3014,14 +3016,14 @@ static void nfs4_xdr_enc_getdeviceinfo(struct rpc_rqst *req, encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); - replen = hdr.replen + op_decode_hdr_maxsz; + replen = hdr.replen + op_decode_hdr_maxsz + 2; encode_getdeviceinfo(xdr, args, &hdr); /* set up reply kvec. device_addr4 opaque data is read into the * pages */ rpc_prepare_reply_pages(req, args->pdev->pages, args->pdev->pgbase, - args->pdev->pglen, replen + 2 + 1); + args->pdev->pglen, replen); encode_nops(&hdr); } @@ -3043,7 +3045,7 @@ static void nfs4_xdr_enc_layoutget(struct rpc_rqst *req, encode_layoutget(xdr, args, &hdr); rpc_prepare_reply_pages(req, args->layout.pages, 0, - args->layout.pglen, hdr.replen); + args->layout.pglen, hdr.replen - pagepad_maxsz); encode_nops(&hdr); } diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 3259120462ed..612f0a641f4c 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -1251,10 +1251,7 @@ void rpc_prepare_reply_pages(struct rpc_rqst *req, struct page **pages, unsigned int base, unsigned int len, unsigned int hdrsize) { - /* Subtract one to force an extra word of buffer space for the - * payload's XDR pad to fall into the rcv_buf's tail iovec. - */ - hdrsize += RPC_REPHDRSIZE + req->rq_cred->cr_auth->au_ralign - 1; + hdrsize += RPC_REPHDRSIZE + req->rq_cred->cr_auth->au_ralign; xdr_inline_pages(&req->rq_rcv_buf, hdrsize << 2, pages, base, len); trace_rpc_xdr_reply_pages(req->rq_task, &req->rq_rcv_buf); diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 3ce0a5daa9eb..5a450055469f 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -193,9 +193,6 @@ xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset, tail->iov_base = buf + offset; tail->iov_len = buflen - offset; - if ((xdr->page_len & 3) == 0) - tail->iov_len -= sizeof(__be32); - xdr->buflen += len; } EXPORT_SYMBOL_GPL(xdr_inline_pages); -- cgit v1.2.3-58-ga151 From 0279024f22705128c7139bd55af6981afe90e876 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 21 Nov 2020 21:21:11 -0500 Subject: SUNRPC: Fix up xdr_set_page() While we always want to align to the next page and/or the beginning of the tail buffer when we call xdr_set_next_page(), the functions xdr_align_data() and xdr_expand_hole() really want to align to the next object in that next page or tail. Signed-off-by: Trond Myklebust --- net/sunrpc/xdr.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 5a450055469f..ddd5cc2281ab 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -1019,8 +1019,10 @@ static unsigned int xdr_set_page_base(struct xdr_stream *xdr, static void xdr_set_page(struct xdr_stream *xdr, unsigned int base, unsigned int len) { - if (xdr_set_page_base(xdr, base, len) == 0) - xdr_set_iov(xdr, xdr->buf->tail, 0, xdr_stream_remaining(xdr)); + if (xdr_set_page_base(xdr, base, len) == 0) { + base -= xdr->buf->page_len; + xdr_set_iov(xdr, xdr->buf->tail, base, len); + } } static void xdr_set_next_page(struct xdr_stream *xdr) @@ -1029,17 +1031,18 @@ static void xdr_set_next_page(struct xdr_stream *xdr) newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT; newbase -= xdr->buf->page_base; - - xdr_set_page(xdr, newbase, PAGE_SIZE); + if (newbase < xdr->buf->page_len) + xdr_set_page_base(xdr, newbase, xdr_stream_remaining(xdr)); + else + xdr_set_iov(xdr, xdr->buf->tail, 0, xdr_stream_remaining(xdr)); } static bool xdr_set_next_buffer(struct xdr_stream *xdr) { if (xdr->page_ptr != NULL) xdr_set_next_page(xdr); - else if (xdr->iov == xdr->buf->head) { - xdr_set_page(xdr, 0, PAGE_SIZE); - } + else if (xdr->iov == xdr->buf->head) + xdr_set_page(xdr, 0, xdr_stream_remaining(xdr)); return xdr->p != xdr->end; } @@ -1277,7 +1280,7 @@ uint64_t xdr_align_data(struct xdr_stream *xdr, uint64_t offset, uint32_t length } xdr->nwords -= XDR_QUADLEN(length); - xdr_set_page(xdr, from + length, PAGE_SIZE); + xdr_set_page(xdr, from + length, xdr_stream_remaining(xdr)); return length; } EXPORT_SYMBOL_GPL(xdr_align_data); @@ -1314,7 +1317,7 @@ uint64_t xdr_expand_hole(struct xdr_stream *xdr, uint64_t offset, uint64_t lengt _zero_pages(buf->pages, buf->page_base + offset, length); buf->len += length - (from - offset) - truncated; - xdr_set_page(xdr, offset + length, PAGE_SIZE); + xdr_set_page(xdr, offset + length, xdr_stream_remaining(xdr)); return length; } EXPORT_SYMBOL_GPL(xdr_expand_hole); -- cgit v1.2.3-58-ga151 From eee1f54964fe868da425fe52a03666377335de01 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 21 Nov 2020 21:39:02 -0500 Subject: SUNRPC: Fix open coded xdr_stream_remaining() Signed-off-by: Trond Myklebust --- net/sunrpc/xdr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index ddd5cc2281ab..c852d199c789 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -1261,7 +1261,7 @@ uint64_t xdr_align_data(struct xdr_stream *xdr, uint64_t offset, uint32_t length xdr_realign_pages(xdr); from = xdr_page_pos(xdr); - bytes = xdr->nwords << 2; + bytes = xdr_stream_remaining(xdr); if (length < bytes) bytes = length; @@ -1298,7 +1298,7 @@ uint64_t xdr_expand_hole(struct xdr_stream *xdr, uint64_t offset, uint64_t lengt xdr_realign_pages(xdr); from = xdr_page_pos(xdr); - bytes = xdr->nwords << 2; + bytes = xdr_stream_remaining(xdr); if (offset + length + bytes > buf->page_len) { unsigned int shift = (offset + length + bytes) - buf->page_len; -- cgit v1.2.3-58-ga151 From 35a6d396721e28ba161595b0fc9e8896c00399bb Mon Sep 17 00:00:00 2001 From: Fedor Tokarev Date: Thu, 15 Oct 2020 16:59:08 +0300 Subject: net: sunrpc: Fix 'snprintf' return value check in 'do_xprt_debugfs' 'snprintf' returns the number of characters which would have been written if enough space had been available, excluding the terminating null byte. Thus, the return value of 'sizeof(buf)' means that the last character has been dropped. Signed-off-by: Fedor Tokarev Fixes: 2f34b8bfae19 ("SUNRPC: add links for all client xprts to debugfs") Signed-off-by: Trond Myklebust --- net/sunrpc/debugfs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/sunrpc/debugfs.c b/net/sunrpc/debugfs.c index fd9bca242724..56029e3af6ff 100644 --- a/net/sunrpc/debugfs.c +++ b/net/sunrpc/debugfs.c @@ -128,13 +128,13 @@ static int do_xprt_debugfs(struct rpc_clnt *clnt, struct rpc_xprt *xprt, void *n return 0; len = snprintf(name, sizeof(name), "../../rpc_xprt/%s", xprt->debugfs->d_name.name); - if (len > sizeof(name)) + if (len >= sizeof(name)) return -1; if (*nump == 0) strcpy(link, "xprt"); else { len = snprintf(link, sizeof(link), "xprt%d", *nump); - if (len > sizeof(link)) + if (len >= sizeof(link)) return -1; } debugfs_create_symlink(link, clnt->cl_debugfs, name); -- cgit v1.2.3-58-ga151 From ac9645c87380e39a8fa87a1b51721efcdea89dbf Mon Sep 17 00:00:00 2001 From: Dan Aloni Date: Sat, 5 Dec 2020 11:28:35 +0200 Subject: sunrpc: fix xs_read_xdr_buf for partial pages receive When receiving pages data, return value 'ret' when positive includes `buf->page_base`, so we should subtract that before it is used for changing `offset` and comparing against `want`. This was discovered on the very rare cases where the server returned a chunk of bytes that when added to the already received amount of bytes for the pages happened to match the current `recv.len`, for example on this case: buf->page_base : 258356 actually received from socket: 1740 ret : 260096 want : 260096 In this case neither of the two 'if ... goto out' trigger, and we continue to tail parsing. Worth to mention that the ensuing EMSGSIZE from the continued execution of `xs_read_xdr_buf` may be observed by an application due to 4 superfluous bytes being added to the pages data. Fixes: 277e4ab7d530 ("SUNRPC: Simplify TCP receive code by switching to using iterators") Signed-off-by: Dan Aloni Signed-off-by: Trond Myklebust --- net/sunrpc/xprtsock.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index c93ff70da3f9..c56a66cdf4ac 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -433,7 +433,8 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags, if (ret <= 0) goto sock_err; xs_flush_bvec(buf->bvec, ret, seek + buf->page_base); - offset += ret - buf->page_base; + ret -= buf->page_base; + offset += ret; if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC)) goto out; if (ret != want) -- cgit v1.2.3-58-ga151 From 15261b9126cd5bb2ad8521da49d8f5c042d904c7 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 8 Dec 2020 18:29:02 -0500 Subject: xprtrdma: Fix XDRBUF_SPARSE_PAGES support Olga K. observed that rpcrdma_marsh_req() allocates sparse pages only when it has determined that a Reply chunk is necessary. There are plenty of cases where no Reply chunk is needed, but the XDRBUF_SPARSE_PAGES flag is set. The result would be a crash in rpcrdma_inline_fixup() when it tries to copy parts of the received Reply into a missing page. To avoid crashing, handle sparse page allocation up front. Until XATTR support was added, this issue did not appear often because the only SPARSE_PAGES consumer always expected a reply large enough to always require a Reply chunk. Reported-by: Olga Kornievskaia Signed-off-by: Chuck Lever Cc: Signed-off-by: Trond Myklebust --- net/sunrpc/xprtrdma/rpc_rdma.c | 40 +++++++++++++++++++++++++++++++--------- 1 file changed, 31 insertions(+), 9 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index 0f5120c7668f..c48536f2121f 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c @@ -179,6 +179,31 @@ rpcrdma_nonpayload_inline(const struct rpcrdma_xprt *r_xprt, r_xprt->rx_ep->re_max_inline_recv; } +/* ACL likes to be lazy in allocating pages. For TCP, these + * pages can be allocated during receive processing. Not true + * for RDMA, which must always provision receive buffers + * up front. + */ +static noinline int +rpcrdma_alloc_sparse_pages(struct xdr_buf *buf) +{ + struct page **ppages; + int len; + + len = buf->page_len; + ppages = buf->pages + (buf->page_base >> PAGE_SHIFT); + while (len > 0) { + if (!*ppages) + *ppages = alloc_page(GFP_NOWAIT | __GFP_NOWARN); + if (!*ppages) + return -ENOBUFS; + ppages++; + len -= PAGE_SIZE; + } + + return 0; +} + /* Split @vec on page boundaries into SGEs. FMR registers pages, not * a byte range. Other modes coalesce these SGEs into a single MR * when they can. @@ -233,15 +258,6 @@ rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf, ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT); page_base = offset_in_page(xdrbuf->page_base); while (len) { - /* ACL likes to be lazy in allocating pages - ACLs - * are small by default but can get huge. - */ - if (unlikely(xdrbuf->flags & XDRBUF_SPARSE_PAGES)) { - if (!*ppages) - *ppages = alloc_page(GFP_NOWAIT | __GFP_NOWARN); - if (!*ppages) - return -ENOBUFS; - } seg->mr_page = *ppages; seg->mr_offset = (char *)page_base; seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len); @@ -867,6 +883,12 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst) __be32 *p; int ret; + if (unlikely(rqst->rq_rcv_buf.flags & XDRBUF_SPARSE_PAGES)) { + ret = rpcrdma_alloc_sparse_pages(&rqst->rq_rcv_buf); + if (ret) + return ret; + } + rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0); xdr_init_encode(xdr, &req->rl_hdrbuf, rdmab_data(req->rl_rdmabuf), rqst); -- cgit v1.2.3-58-ga151 From c54e959b36cbdb0cb2f2805e3e945dd83476a5c7 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 7 Dec 2020 13:30:46 -0500 Subject: SUNRPC: _shift_data_left/right_pages should check the shift length Exit early if the shift is zero. Signed-off-by: Trond Myklebust --- net/sunrpc/xdr.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'net') diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index c852d199c789..5833329c132c 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -225,6 +225,9 @@ _shift_data_left_pages(struct page **pages, size_t pgto_base, BUG_ON(pgfrom_base <= pgto_base); + if (!len) + return; + pgto = pages + (pgto_base >> PAGE_SHIFT); pgfrom = pages + (pgfrom_base >> PAGE_SHIFT); @@ -307,6 +310,9 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base, BUG_ON(pgto_base <= pgfrom_base); + if (!len) + return; + pgto_base += len; pgfrom_base += len; @@ -405,6 +411,9 @@ _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len) char *vto; size_t copy; + if (!len) + return; + pgto = pages + (pgbase >> PAGE_SHIFT); pgbase &= ~PAGE_MASK; @@ -449,6 +458,9 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len) char *vfrom; size_t copy; + if (!len) + return; + pgfrom = pages + (pgbase >> PAGE_SHIFT); pgbase &= ~PAGE_MASK; -- cgit v1.2.3-58-ga151 From 9a20f6f4e6ba9713605fbf7e7426ca22f1181545 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 4 Dec 2020 15:16:46 -0500 Subject: SUNRPC: Fixes for xdr_align_data() The main use case right now for xdr_align_data() is to shift the page data to the left, and in practice shrink the total XDR data buffer. This patch ensures that we fix up the accounting for the buffer length as we shift that data around. Signed-off-by: Trond Myklebust --- include/linux/sunrpc/xdr.h | 2 +- net/sunrpc/xdr.c | 174 ++++++++++++++++++++++++++++++++++----------- 2 files changed, 133 insertions(+), 43 deletions(-) (limited to 'net') diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 9548d075e06d..2b4e44bb0654 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -252,7 +252,7 @@ extern __be32 *xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes); extern unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len); extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len); extern int xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data); -extern uint64_t xdr_align_data(struct xdr_stream *, uint64_t, uint32_t); +extern unsigned int xdr_align_data(struct xdr_stream *, unsigned int offset, unsigned int length); extern uint64_t xdr_expand_hole(struct xdr_stream *, uint64_t, uint64_t); /** diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 5833329c132c..c474339ba9ac 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -266,26 +266,6 @@ _shift_data_left_pages(struct page **pages, size_t pgto_base, } while ((len -= copy) != 0); } -static void -_shift_data_left_tail(struct xdr_buf *buf, unsigned int pgto, size_t len) -{ - struct kvec *tail = buf->tail; - - if (len > tail->iov_len) - len = tail->iov_len; - - _copy_to_pages(buf->pages, - buf->page_base + pgto, - (char *)tail->iov_base, - len); - tail->iov_len -= len; - - if (tail->iov_len > 0) - memmove((char *)tail->iov_base, - tail->iov_base + len, - tail->iov_len); -} - /** * _shift_data_right_pages * @pages: vector of pages containing both the source and dest memory area. @@ -516,6 +496,109 @@ _zero_pages(struct page **pages, size_t pgbase, size_t len) } while ((len -= zero) != 0); } +static void xdr_buf_tail_copy_left(const struct xdr_buf *buf, unsigned int base, + unsigned int len, unsigned int shift) +{ + const struct kvec *tail = buf->tail; + + if (base >= tail->iov_len) + return; + if (len > tail->iov_len - base) + len = tail->iov_len - base; + /* Shift data into head */ + if (shift > buf->page_len + base) { + const struct kvec *head = buf->head; + unsigned int hdto = + head->iov_len + buf->page_len + base - shift; + unsigned int hdlen = len; + + if (WARN_ONCE(shift > head->iov_len + buf->page_len + base, + "SUNRPC: Misaligned data.\n")) + return; + if (hdto + hdlen > head->iov_len) + hdlen = head->iov_len - hdto; + memcpy(head->iov_base + hdto, tail->iov_base + base, hdlen); + base += hdlen; + len -= hdlen; + if (!len) + return; + } + /* Shift data into pages */ + if (shift > base) { + unsigned int pgto = buf->page_len + base - shift; + unsigned int pglen = len; + + if (pgto + pglen > buf->page_len) + pglen = buf->page_len - pgto; + _copy_to_pages(buf->pages, buf->page_base + pgto, + tail->iov_base + base, pglen); + base += pglen; + len -= pglen; + if (!len) + return; + } + memmove(tail->iov_base + base - shift, tail->iov_base + base, len); +} + +static void xdr_buf_pages_copy_left(const struct xdr_buf *buf, + unsigned int base, unsigned int len, + unsigned int shift) +{ + unsigned int pgto; + + if (base >= buf->page_len) + return; + if (len > buf->page_len - base) + len = buf->page_len - base; + /* Shift data into head */ + if (shift > base) { + const struct kvec *head = buf->head; + unsigned int hdto = head->iov_len + base - shift; + unsigned int hdlen = len; + + if (WARN_ONCE(shift > head->iov_len + base, + "SUNRPC: Misaligned data.\n")) + return; + if (hdto + hdlen > head->iov_len) + hdlen = head->iov_len - hdto; + _copy_from_pages(head->iov_base + hdto, buf->pages, + buf->page_base + base, hdlen); + base += hdlen; + len -= hdlen; + if (!len) + return; + } + pgto = base - shift; + _shift_data_left_pages(buf->pages, buf->page_base + pgto, + buf->page_base + base, len); +} + +static void xdr_buf_tail_shift_left(const struct xdr_buf *buf, + unsigned int base, unsigned int len, + unsigned int shift) +{ + if (!shift || !len) + return; + xdr_buf_tail_copy_left(buf, base, len, shift); +} + +static void xdr_buf_pages_shift_left(const struct xdr_buf *buf, + unsigned int base, unsigned int len, + unsigned int shift) +{ + if (!shift || !len) + return; + if (base >= buf->page_len) { + xdr_buf_tail_shift_left(buf, base - buf->page_len, len, shift); + return; + } + xdr_buf_pages_copy_left(buf, base, len, shift); + len += base; + if (len <= buf->page_len) + return; + xdr_buf_tail_copy_left(buf, 0, len - buf->page_len, shift); +} + /** * xdr_shrink_bufhead * @buf: xdr_buf @@ -1261,38 +1344,45 @@ unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len) } EXPORT_SYMBOL_GPL(xdr_read_pages); -uint64_t xdr_align_data(struct xdr_stream *xdr, uint64_t offset, uint32_t length) +unsigned int xdr_align_data(struct xdr_stream *xdr, unsigned int offset, + unsigned int length) { struct xdr_buf *buf = xdr->buf; - unsigned int from, bytes; - unsigned int shift = 0; - - if ((offset + length) < offset || - (offset + length) > buf->page_len) - length = buf->page_len - offset; + unsigned int from, bytes, len; + unsigned int shift; xdr_realign_pages(xdr); from = xdr_page_pos(xdr); - bytes = xdr_stream_remaining(xdr); - if (length < bytes) - bytes = length; + + if (from >= buf->page_len + buf->tail->iov_len) + return 0; + if (from + buf->head->iov_len >= buf->len) + return 0; + + len = buf->len - buf->head->iov_len; + + /* We only shift data left! */ + if (WARN_ONCE(from < offset, "SUNRPC: misaligned data src=%u dst=%u\n", + from, offset)) + return 0; + if (WARN_ONCE(offset > buf->page_len, + "SUNRPC: buffer overflow. offset=%u, page_len=%u\n", + offset, buf->page_len)) + return 0; /* Move page data to the left */ - if (from > offset) { - shift = min_t(unsigned int, bytes, buf->page_len - from); - _shift_data_left_pages(buf->pages, - buf->page_base + offset, - buf->page_base + from, - shift); - bytes -= shift; + shift = from - offset; + xdr_buf_pages_shift_left(buf, from, len, shift); + xdr->buf->len -= shift; + xdr->nwords -= XDR_QUADLEN(shift); - /* Move tail data into the pages, if necessary */ - if (bytes > 0) - _shift_data_left_tail(buf, offset + shift, bytes); - } + bytes = xdr_stream_remaining(xdr); + if (length > bytes) + length = bytes; + bytes -= length; xdr->nwords -= XDR_QUADLEN(length); - xdr_set_page(xdr, from + length, xdr_stream_remaining(xdr)); + xdr_set_page(xdr, offset + length, bytes); return length; } EXPORT_SYMBOL_GPL(xdr_align_data); -- cgit v1.2.3-58-ga151 From c4f2f591f02c392ea7de018d2733748bf4c7b5f5 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 4 Dec 2020 17:15:09 -0500 Subject: SUNRPC: Fix xdr_expand_hole() We do want to try to grow the buffer if possible, but if that attempt fails, we still want to move the data and truncate the XDR message. Signed-off-by: Trond Myklebust --- include/linux/sunrpc/xdr.h | 2 +- net/sunrpc/xdr.c | 274 +++++++++++++++++++++++++++++---------------- 2 files changed, 180 insertions(+), 96 deletions(-) (limited to 'net') diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 2b4e44bb0654..178f499e2283 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -253,7 +253,7 @@ extern unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len); extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len); extern int xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data); extern unsigned int xdr_align_data(struct xdr_stream *, unsigned int offset, unsigned int length); -extern uint64_t xdr_expand_hole(struct xdr_stream *, uint64_t, uint64_t); +extern unsigned int xdr_expand_hole(struct xdr_stream *, unsigned int offset, unsigned int length); /** * xdr_stream_remaining - Return the number of bytes remaining in the stream diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index c474339ba9ac..e0906ed24374 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -334,46 +334,6 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base, } while ((len -= copy) != 0); } -static unsigned int -_shift_data_right_tail(struct xdr_buf *buf, unsigned int pgfrom, size_t len) -{ - struct kvec *tail = buf->tail; - unsigned int tailbuf_len; - unsigned int result = 0; - size_t copy; - - tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len; - - /* Shift the tail first */ - if (tailbuf_len != 0) { - unsigned int free_space = tailbuf_len - tail->iov_len; - - if (len < free_space) - free_space = len; - if (len > free_space) - len = free_space; - - tail->iov_len += free_space; - copy = len; - - if (tail->iov_len > len) { - char *p = (char *)tail->iov_base + len; - memmove(p, tail->iov_base, tail->iov_len - free_space); - result += tail->iov_len - free_space; - } else - copy = tail->iov_len; - - /* Copy from the inlined pages into the tail */ - _copy_from_pages((char *)tail->iov_base, - buf->pages, - buf->page_base + pgfrom, - copy); - result += copy; - } - - return result; -} - /** * _copy_to_pages * @pages: array of pages @@ -464,18 +424,42 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len) } EXPORT_SYMBOL_GPL(_copy_from_pages); +static void xdr_buf_iov_zero(const struct kvec *iov, unsigned int base, + unsigned int len) +{ + if (base >= iov->iov_len) + return; + if (len > iov->iov_len - base) + len = iov->iov_len - base; + memset(iov->iov_base + base, 0, len); +} + /** - * _zero_pages - * @pages: array of pages - * @pgbase: beginning page vector address + * xdr_buf_pages_zero + * @buf: xdr_buf + * @pgbase: beginning offset * @len: length */ -static void -_zero_pages(struct page **pages, size_t pgbase, size_t len) +static void xdr_buf_pages_zero(const struct xdr_buf *buf, unsigned int pgbase, + unsigned int len) { + struct page **pages = buf->pages; struct page **page; char *vpage; - size_t zero; + unsigned int zero; + + if (!len) + return; + if (pgbase >= buf->page_len) { + xdr_buf_iov_zero(buf->tail, pgbase - buf->page_len, len); + return; + } + if (pgbase + len > buf->page_len) { + xdr_buf_iov_zero(buf->tail, 0, pgbase + len - buf->page_len); + len = buf->page_len - pgbase; + } + + pgbase += buf->page_base; page = pages + (pgbase >> PAGE_SHIFT); pgbase &= ~PAGE_MASK; @@ -496,6 +480,103 @@ _zero_pages(struct page **pages, size_t pgbase, size_t len) } while ((len -= zero) != 0); } +static void xdr_buf_try_expand(struct xdr_buf *buf, unsigned int len) +{ + struct kvec *head = buf->head; + struct kvec *tail = buf->tail; + unsigned int sum = head->iov_len + buf->page_len + tail->iov_len; + unsigned int free_space; + + if (sum > buf->len) { + free_space = min_t(unsigned int, sum - buf->len, len); + buf->len += free_space; + len -= free_space; + if (!len) + return; + } + + if (buf->buflen > sum) { + /* Expand the tail buffer */ + free_space = min_t(unsigned int, buf->buflen - sum, len); + tail->iov_len += free_space; + buf->len += free_space; + } +} + +static void xdr_buf_tail_copy_right(const struct xdr_buf *buf, + unsigned int base, unsigned int len, + unsigned int shift) +{ + const struct kvec *tail = buf->tail; + unsigned int to = base + shift; + + if (to >= tail->iov_len) + return; + if (len + to > tail->iov_len) + len = tail->iov_len - to; + memmove(tail->iov_base + to, tail->iov_base + base, len); +} + +static void xdr_buf_pages_copy_right(const struct xdr_buf *buf, + unsigned int base, unsigned int len, + unsigned int shift) +{ + const struct kvec *tail = buf->tail; + unsigned int to = base + shift; + unsigned int pglen = 0; + unsigned int talen = 0, tato = 0; + + if (base >= buf->page_len) + return; + if (len > buf->page_len - base) + len = buf->page_len - base; + if (to >= buf->page_len) { + tato = to - buf->page_len; + if (tail->iov_len >= len + tato) + talen = len; + else if (tail->iov_len > tato) + talen = tail->iov_len - tato; + } else if (len + to >= buf->page_len) { + pglen = buf->page_len - to; + talen = len - pglen; + if (talen > tail->iov_len) + talen = tail->iov_len; + } else + pglen = len; + + _copy_from_pages(tail->iov_base + tato, buf->pages, + buf->page_base + base + pglen, talen); + _shift_data_right_pages(buf->pages, buf->page_base + to, + buf->page_base + base, pglen); +} + +static void xdr_buf_tail_shift_right(const struct xdr_buf *buf, + unsigned int base, unsigned int len, + unsigned int shift) +{ + const struct kvec *tail = buf->tail; + + if (base >= tail->iov_len || !shift || !len) + return; + xdr_buf_tail_copy_right(buf, base, len, shift); +} + +static void xdr_buf_pages_shift_right(const struct xdr_buf *buf, + unsigned int base, unsigned int len, + unsigned int shift) +{ + if (!shift || !len) + return; + if (base >= buf->page_len) { + xdr_buf_tail_shift_right(buf, base - buf->page_len, len, shift); + return; + } + if (base + len > buf->page_len) + xdr_buf_tail_shift_right(buf, 0, base + len - buf->page_len, + shift); + xdr_buf_pages_copy_right(buf, base, len, shift); +} + static void xdr_buf_tail_copy_left(const struct xdr_buf *buf, unsigned int base, unsigned int len, unsigned int shift) { @@ -685,30 +766,33 @@ xdr_shrink_bufhead(struct xdr_buf *buf, size_t len) } /** - * xdr_shrink_pagelen - shrinks buf->pages by up to @len bytes + * xdr_shrink_pagelen - shrinks buf->pages to @len bytes * @buf: xdr_buf - * @len: bytes to remove from buf->pages + * @len: new page buffer length * * The extra data is not lost, but is instead moved into buf->tail. * Returns the actual number of bytes moved. */ -static unsigned int -xdr_shrink_pagelen(struct xdr_buf *buf, size_t len) +static unsigned int xdr_shrink_pagelen(struct xdr_buf *buf, unsigned int len) { - unsigned int pglen = buf->page_len; - unsigned int result; - - if (len > buf->page_len) - len = buf-> page_len; - - result = _shift_data_right_tail(buf, pglen - len, len); - buf->page_len -= len; - buf->buflen -= len; - /* Have we truncated the message? */ - if (buf->len > buf->buflen) - buf->len = buf->buflen; + unsigned int shift, buflen = buf->len - buf->head->iov_len; - return result; + WARN_ON_ONCE(len > buf->page_len); + if (buf->head->iov_len >= buf->len || len > buflen) + buflen = len; + if (buf->page_len > buflen) { + buf->buflen -= buf->page_len - buflen; + buf->page_len = buflen; + } + if (len >= buf->page_len) + return 0; + shift = buf->page_len - len; + xdr_buf_try_expand(buf, shift); + xdr_buf_pages_shift_right(buf, len, buflen - len, shift); + buf->page_len = len; + buf->len -= shift; + buf->buflen -= shift; + return shift; } void @@ -728,6 +812,18 @@ unsigned int xdr_stream_pos(const struct xdr_stream *xdr) } EXPORT_SYMBOL_GPL(xdr_stream_pos); +static void xdr_stream_set_pos(struct xdr_stream *xdr, unsigned int pos) +{ + unsigned int blen = xdr->buf->len; + + xdr->nwords = blen > pos ? XDR_QUADLEN(blen) - XDR_QUADLEN(pos) : 0; +} + +static void xdr_stream_page_set_pos(struct xdr_stream *xdr, unsigned int pos) +{ + xdr_stream_set_pos(xdr, pos + xdr->buf->head[0].iov_len); +} + /** * xdr_page_pos - Return the current offset from the start of the xdr pages * @xdr: pointer to struct xdr_stream @@ -1291,7 +1387,7 @@ static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len) struct xdr_buf *buf = xdr->buf; unsigned int nwords = XDR_QUADLEN(len); unsigned int cur = xdr_stream_pos(xdr); - unsigned int copied, offset; + unsigned int copied; if (xdr->nwords == 0) return 0; @@ -1305,9 +1401,8 @@ static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len) len = buf->page_len; else if (nwords < xdr->nwords) { /* Truncate page data and move it into the tail */ - offset = buf->page_len - len; - copied = xdr_shrink_pagelen(buf, offset); - trace_rpc_xdr_alignment(xdr, offset, copied); + copied = xdr_shrink_pagelen(buf, len); + trace_rpc_xdr_alignment(xdr, len, copied); xdr->nwords = XDR_QUADLEN(buf->len - cur); } return len; @@ -1387,39 +1482,28 @@ unsigned int xdr_align_data(struct xdr_stream *xdr, unsigned int offset, } EXPORT_SYMBOL_GPL(xdr_align_data); -uint64_t xdr_expand_hole(struct xdr_stream *xdr, uint64_t offset, uint64_t length) +unsigned int xdr_expand_hole(struct xdr_stream *xdr, unsigned int offset, + unsigned int length) { struct xdr_buf *buf = xdr->buf; - unsigned int bytes; - unsigned int from; - unsigned int truncated = 0; - - if ((offset + length) < offset || - (offset + length) > buf->page_len) - length = buf->page_len - offset; + unsigned int from, to, shift; xdr_realign_pages(xdr); from = xdr_page_pos(xdr); - bytes = xdr_stream_remaining(xdr); - - if (offset + length + bytes > buf->page_len) { - unsigned int shift = (offset + length + bytes) - buf->page_len; - unsigned int res = _shift_data_right_tail(buf, from + bytes - shift, shift); - truncated = shift - res; - xdr->nwords -= XDR_QUADLEN(truncated); - bytes -= shift; - } - - /* Now move the page data over and zero pages */ - if (bytes > 0) - _shift_data_right_pages(buf->pages, - buf->page_base + offset + length, - buf->page_base + from, - bytes); - _zero_pages(buf->pages, buf->page_base + offset, length); - - buf->len += length - (from - offset) - truncated; - xdr_set_page(xdr, offset + length, xdr_stream_remaining(xdr)); + to = xdr_align_size(offset + length); + + /* Could the hole be behind us? */ + if (to > from) { + unsigned int buflen = buf->len - buf->head->iov_len; + shift = to - from; + xdr_buf_try_expand(buf, shift); + xdr_buf_pages_shift_right(buf, from, buflen, shift); + xdr_stream_page_set_pos(xdr, to); + } else if (to != from) + xdr_align_data(xdr, to, 0); + xdr_buf_pages_zero(buf, offset, length); + + xdr_set_page(xdr, to, xdr_stream_remaining(xdr)); return length; } EXPORT_SYMBOL_GPL(xdr_expand_hole); -- cgit v1.2.3-58-ga151 From 6707fbd7d3be72da4ebde7f56b46814befd2db19 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 6 Dec 2020 10:15:04 -0500 Subject: SUNRPC: Cleanup xdr_shrink_bufhead() Clean up xdr_shrink_bufhead() to use the new helpers instead of doing its own thing. Signed-off-by: Trond Myklebust --- net/sunrpc/xdr.c | 164 +++++++++++++++++++++++++++++-------------------------- 1 file changed, 87 insertions(+), 77 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index e0906ed24374..19eaa38f7d16 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -550,6 +550,53 @@ static void xdr_buf_pages_copy_right(const struct xdr_buf *buf, buf->page_base + base, pglen); } +static void xdr_buf_head_copy_right(const struct xdr_buf *buf, + unsigned int base, unsigned int len, + unsigned int shift) +{ + const struct kvec *head = buf->head; + const struct kvec *tail = buf->tail; + unsigned int to = base + shift; + unsigned int pglen = 0, pgto = 0; + unsigned int talen = 0, tato = 0; + + if (base >= head->iov_len) + return; + if (len > head->iov_len - base) + len = head->iov_len - base; + if (to >= buf->page_len + head->iov_len) { + tato = to - buf->page_len - head->iov_len; + talen = len; + } else if (to >= head->iov_len) { + pgto = to - head->iov_len; + pglen = len; + if (pgto + pglen > buf->page_len) { + talen = pgto + pglen - buf->page_len; + pglen -= talen; + } + } else { + pglen = len - to; + if (pglen > buf->page_len) { + talen = pglen - buf->page_len; + pglen = buf->page_len; + } + } + + len -= talen; + base += len; + if (talen + tato > tail->iov_len) + talen = tail->iov_len > tato ? tail->iov_len - tato : 0; + memcpy(tail->iov_base + tato, head->iov_base + base, talen); + + len -= pglen; + base -= pglen; + _copy_to_pages(buf->pages, buf->page_base + pgto, head->iov_base + base, + pglen); + + base -= len; + memmove(head->iov_base + to, head->iov_base + base, len); +} + static void xdr_buf_tail_shift_right(const struct xdr_buf *buf, unsigned int base, unsigned int len, unsigned int shift) @@ -577,6 +624,25 @@ static void xdr_buf_pages_shift_right(const struct xdr_buf *buf, xdr_buf_pages_copy_right(buf, base, len, shift); } +static void xdr_buf_head_shift_right(const struct xdr_buf *buf, + unsigned int base, unsigned int len, + unsigned int shift) +{ + const struct kvec *head = buf->head; + + if (!shift) + return; + if (base >= head->iov_len) { + xdr_buf_pages_shift_right(buf, head->iov_len - base, len, + shift); + return; + } + if (base + len > head->iov_len) + xdr_buf_pages_shift_right(buf, 0, base + len - head->iov_len, + shift); + xdr_buf_head_copy_right(buf, base, len, shift); +} + static void xdr_buf_tail_copy_left(const struct xdr_buf *buf, unsigned int base, unsigned int len, unsigned int shift) { @@ -683,86 +749,31 @@ static void xdr_buf_pages_shift_left(const struct xdr_buf *buf, /** * xdr_shrink_bufhead * @buf: xdr_buf - * @len: bytes to remove from buf->head[0] + * @len: new length of buf->head[0] * - * Shrinks XDR buffer's header kvec buf->head[0] by + * Shrinks XDR buffer's header kvec buf->head[0], setting it to * 'len' bytes. The extra data is not lost, but is instead * moved into the inlined pages and/or the tail. */ -static unsigned int -xdr_shrink_bufhead(struct xdr_buf *buf, size_t len) +static unsigned int xdr_shrink_bufhead(struct xdr_buf *buf, unsigned int len) { - struct kvec *head, *tail; - size_t copy, offs; - unsigned int pglen = buf->page_len; - unsigned int result; - - result = 0; - tail = buf->tail; - head = buf->head; + struct kvec *head = buf->head; + unsigned int shift, buflen = max(buf->len, len); WARN_ON_ONCE(len > head->iov_len); - if (len > head->iov_len) - len = head->iov_len; - - /* Shift the tail first */ - if (tail->iov_len != 0) { - if (tail->iov_len > len) { - copy = tail->iov_len - len; - memmove((char *)tail->iov_base + len, - tail->iov_base, copy); - result += copy; - } - /* Copy from the inlined pages into the tail */ - copy = len; - if (copy > pglen) - copy = pglen; - offs = len - copy; - if (offs >= tail->iov_len) - copy = 0; - else if (copy > tail->iov_len - offs) - copy = tail->iov_len - offs; - if (copy != 0) { - _copy_from_pages((char *)tail->iov_base + offs, - buf->pages, - buf->page_base + pglen + offs - len, - copy); - result += copy; - } - /* Do we also need to copy data from the head into the tail ? */ - if (len > pglen) { - offs = copy = len - pglen; - if (copy > tail->iov_len) - copy = tail->iov_len; - memcpy(tail->iov_base, - (char *)head->iov_base + - head->iov_len - offs, - copy); - result += copy; - } + if (head->iov_len > buflen) { + buf->buflen -= head->iov_len - buflen; + head->iov_len = buflen; } - /* Now handle pages */ - if (pglen != 0) { - if (pglen > len) - _shift_data_right_pages(buf->pages, - buf->page_base + len, - buf->page_base, - pglen - len); - copy = len; - if (len > pglen) - copy = pglen; - _copy_to_pages(buf->pages, buf->page_base, - (char *)head->iov_base + head->iov_len - len, - copy); - result += copy; - } - head->iov_len -= len; - buf->buflen -= len; - /* Have we truncated the message? */ - if (buf->len > buf->buflen) - buf->len = buf->buflen; - - return result; + if (len >= head->iov_len) + return 0; + shift = head->iov_len - len; + xdr_buf_try_expand(buf, shift); + xdr_buf_head_shift_right(buf, len, buflen - len, shift); + head->iov_len = len; + buf->buflen -= shift; + buf->len -= shift; + return shift; } /** @@ -798,7 +809,7 @@ static unsigned int xdr_shrink_pagelen(struct xdr_buf *buf, unsigned int len) void xdr_shift_buf(struct xdr_buf *buf, size_t len) { - xdr_shrink_bufhead(buf, len); + xdr_shrink_bufhead(buf, buf->head->iov_len - len); } EXPORT_SYMBOL_GPL(xdr_shift_buf); @@ -1371,13 +1382,12 @@ static void xdr_realign_pages(struct xdr_stream *xdr) struct xdr_buf *buf = xdr->buf; struct kvec *iov = buf->head; unsigned int cur = xdr_stream_pos(xdr); - unsigned int copied, offset; + unsigned int copied; /* Realign pages to current pointer position */ if (iov->iov_len > cur) { - offset = iov->iov_len - cur; - copied = xdr_shrink_bufhead(buf, offset); - trace_rpc_xdr_alignment(xdr, offset, copied); + copied = xdr_shrink_bufhead(buf, cur); + trace_rpc_xdr_alignment(xdr, cur, copied); xdr->nwords = XDR_QUADLEN(buf->len - cur); } } -- cgit v1.2.3-58-ga151 From e43ac22b83921928479da0bad25aaee3d95c2b1a Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 6 Dec 2020 12:41:41 -0500 Subject: SUNRPC: _copy_to/from_pages() now check for zero length Clean up callers of _copy_to/from_pages() that still check for a zero length. Signed-off-by: Trond Myklebust --- net/sunrpc/xdr.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 19eaa38f7d16..01918e60b67b 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -1665,8 +1665,7 @@ static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigne len -= this_len; obj += this_len; this_len = min_t(unsigned int, len, subbuf->page_len); - if (this_len) - _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len); + _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len); len -= this_len; obj += this_len; this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len); @@ -1696,8 +1695,7 @@ static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned len -= this_len; obj += this_len; this_len = min_t(unsigned int, len, subbuf->page_len); - if (this_len) - _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len); + _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len); len -= this_len; obj += this_len; this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len); -- cgit v1.2.3-58-ga151 From 5a5f1c2c2cbb6ddef637abb7c7e7cab20b9cc933 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 8 Dec 2020 15:02:14 -0500 Subject: SUNRPC: Clean up open coded setting of the xdr_stream 'nwords' field Move the setting of the xdr_stream 'nwords' field into the helpers that reset the xdr_stream cursor. Signed-off-by: Trond Myklebust --- net/sunrpc/xdr.c | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 01918e60b67b..f0444bf5617c 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -1183,6 +1183,15 @@ static unsigned int xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov, return len - base; } +static unsigned int xdr_set_tail_base(struct xdr_stream *xdr, + unsigned int base, unsigned int len) +{ + struct xdr_buf *buf = xdr->buf; + + xdr_stream_set_pos(xdr, base + buf->page_len + buf->head->iov_len); + return xdr_set_iov(xdr, buf->tail, base, len); +} + static unsigned int xdr_set_page_base(struct xdr_stream *xdr, unsigned int base, unsigned int len) { @@ -1201,6 +1210,7 @@ static unsigned int xdr_set_page_base(struct xdr_stream *xdr, if (len > maxlen) len = maxlen; + xdr_stream_page_set_pos(xdr, base); base += xdr->buf->page_base; pgnr = base >> PAGE_SHIFT; @@ -1223,7 +1233,7 @@ static void xdr_set_page(struct xdr_stream *xdr, unsigned int base, { if (xdr_set_page_base(xdr, base, len) == 0) { base -= xdr->buf->page_len; - xdr_set_iov(xdr, xdr->buf->tail, base, len); + xdr_set_tail_base(xdr, base, len); } } @@ -1236,7 +1246,7 @@ static void xdr_set_next_page(struct xdr_stream *xdr) if (newbase < xdr->buf->page_len) xdr_set_page_base(xdr, newbase, xdr_stream_remaining(xdr)); else - xdr_set_iov(xdr, xdr->buf->tail, 0, xdr_stream_remaining(xdr)); + xdr_set_tail_base(xdr, 0, xdr_stream_remaining(xdr)); } static bool xdr_set_next_buffer(struct xdr_stream *xdr) @@ -1388,7 +1398,7 @@ static void xdr_realign_pages(struct xdr_stream *xdr) if (iov->iov_len > cur) { copied = xdr_shrink_bufhead(buf, cur); trace_rpc_xdr_alignment(xdr, cur, copied); - xdr->nwords = XDR_QUADLEN(buf->len - cur); + xdr_set_page(xdr, 0, buf->page_len); } } @@ -1396,7 +1406,6 @@ static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len) { struct xdr_buf *buf = xdr->buf; unsigned int nwords = XDR_QUADLEN(len); - unsigned int cur = xdr_stream_pos(xdr); unsigned int copied; if (xdr->nwords == 0) @@ -1413,7 +1422,6 @@ static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len) /* Truncate page data and move it into the tail */ copied = xdr_shrink_pagelen(buf, len); trace_rpc_xdr_alignment(xdr, len, copied); - xdr->nwords = XDR_QUADLEN(buf->len - cur); } return len; } @@ -1439,12 +1447,10 @@ unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len) if (pglen == 0) return 0; - xdr->nwords -= nwords; base = (nwords << 2) - pglen; end = xdr_stream_remaining(xdr) - pglen; - if (xdr_set_iov(xdr, xdr->buf->tail, base, end) == 0) - xdr->nwords = 0; + xdr_set_tail_base(xdr, base, end); return len <= pglen ? len : pglen; } EXPORT_SYMBOL_GPL(xdr_read_pages); @@ -1478,15 +1484,13 @@ unsigned int xdr_align_data(struct xdr_stream *xdr, unsigned int offset, /* Move page data to the left */ shift = from - offset; xdr_buf_pages_shift_left(buf, from, len, shift); - xdr->buf->len -= shift; - xdr->nwords -= XDR_QUADLEN(shift); bytes = xdr_stream_remaining(xdr); if (length > bytes) length = bytes; bytes -= length; - xdr->nwords -= XDR_QUADLEN(length); + xdr->buf->len -= shift; xdr_set_page(xdr, offset + length, bytes); return length; } @@ -1508,12 +1512,11 @@ unsigned int xdr_expand_hole(struct xdr_stream *xdr, unsigned int offset, shift = to - from; xdr_buf_try_expand(buf, shift); xdr_buf_pages_shift_right(buf, from, buflen, shift); - xdr_stream_page_set_pos(xdr, to); + xdr_set_page(xdr, to, xdr_stream_remaining(xdr)); } else if (to != from) xdr_align_data(xdr, to, 0); xdr_buf_pages_zero(buf, offset, length); - xdr_set_page(xdr, to, xdr_stream_remaining(xdr)); return length; } EXPORT_SYMBOL_GPL(xdr_expand_hole); -- cgit v1.2.3-58-ga151 From f8d0e60f1056687826abc1eded98f0ea067dfc4c Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 8 Dec 2020 22:56:18 -0500 Subject: SUNRPC: Cleanup - constify a number of xdr_buf helpers There are a number of xdr helpers for struct xdr_buf that do not change the structure itself. Mark those as taking const pointers for documentation purposes. Signed-off-by: Trond Myklebust --- include/linux/sunrpc/xdr.h | 22 +++++++++---------- net/sunrpc/xdr.c | 53 +++++++++++++++++++++------------------------- 2 files changed, 35 insertions(+), 40 deletions(-) (limited to 'net') diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 178f499e2283..68d49fdc4ee9 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -128,8 +128,8 @@ __be32 *xdr_decode_netobj(__be32 *p, struct xdr_netobj *); void xdr_inline_pages(struct xdr_buf *, unsigned int, struct page **, unsigned int, unsigned int); -void xdr_terminate_string(struct xdr_buf *, const u32); -size_t xdr_buf_pagecount(struct xdr_buf *buf); +void xdr_terminate_string(const struct xdr_buf *, const u32); +size_t xdr_buf_pagecount(const struct xdr_buf *buf); int xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp); void xdr_free_bvec(struct xdr_buf *buf); @@ -182,14 +182,14 @@ xdr_adjust_iovec(struct kvec *iov, __be32 *p) * XDR buffer helper functions */ extern void xdr_shift_buf(struct xdr_buf *, size_t); -extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *); -extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int); +extern void xdr_buf_from_iov(const struct kvec *, struct xdr_buf *); +extern int xdr_buf_subsegment(const struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int); extern void xdr_buf_trim(struct xdr_buf *, unsigned int); -extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); -extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); +extern int read_bytes_from_xdr_buf(const struct xdr_buf *, unsigned int, void *, unsigned int); +extern int write_bytes_to_xdr_buf(const struct xdr_buf *, unsigned int, void *, unsigned int); -extern int xdr_encode_word(struct xdr_buf *, unsigned int, u32); -extern int xdr_decode_word(struct xdr_buf *, unsigned int, u32 *); +extern int xdr_encode_word(const struct xdr_buf *, unsigned int, u32); +extern int xdr_decode_word(const struct xdr_buf *, unsigned int, u32 *); struct xdr_array2_desc; typedef int (*xdr_xcode_elem_t)(struct xdr_array2_desc *desc, void *elem); @@ -200,9 +200,9 @@ struct xdr_array2_desc { xdr_xcode_elem_t xcode; }; -extern int xdr_decode_array2(struct xdr_buf *buf, unsigned int base, +extern int xdr_decode_array2(const struct xdr_buf *buf, unsigned int base, struct xdr_array2_desc *desc); -extern int xdr_encode_array2(struct xdr_buf *buf, unsigned int base, +extern int xdr_encode_array2(const struct xdr_buf *buf, unsigned int base, struct xdr_array2_desc *desc); extern void _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len); @@ -251,7 +251,7 @@ extern void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buf extern __be32 *xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes); extern unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len); extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len); -extern int xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data); +extern int xdr_process_buf(const struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data); extern unsigned int xdr_align_data(struct xdr_stream *, unsigned int offset, unsigned int length); extern unsigned int xdr_expand_hole(struct xdr_stream *, unsigned int offset, unsigned int length); diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index f0444bf5617c..2e91fbd70f11 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -123,8 +123,7 @@ EXPORT_SYMBOL_GPL(xdr_decode_string_inplace); * @len: length of string, in bytes * */ -void -xdr_terminate_string(struct xdr_buf *buf, const u32 len) +void xdr_terminate_string(const struct xdr_buf *buf, const u32 len) { char *kaddr; @@ -134,8 +133,7 @@ xdr_terminate_string(struct xdr_buf *buf, const u32 len) } EXPORT_SYMBOL_GPL(xdr_terminate_string); -size_t -xdr_buf_pagecount(struct xdr_buf *buf) +size_t xdr_buf_pagecount(const struct xdr_buf *buf) { if (!buf->page_len) return 0; @@ -1545,8 +1543,7 @@ EXPORT_SYMBOL_GPL(xdr_enter_page); static const struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0}; -void -xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf) +void xdr_buf_from_iov(const struct kvec *iov, struct xdr_buf *buf) { buf->head[0] = *iov; buf->tail[0] = empty_iov; @@ -1569,9 +1566,8 @@ EXPORT_SYMBOL_GPL(xdr_buf_from_iov); * * Returns -1 if base of length are out of bounds. */ -int -xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, - unsigned int base, unsigned int len) +int xdr_buf_subsegment(const struct xdr_buf *buf, struct xdr_buf *subbuf, + unsigned int base, unsigned int len) { subbuf->buflen = subbuf->len = len; if (base < buf->head[0].iov_len) { @@ -1659,7 +1655,8 @@ fix_len: } EXPORT_SYMBOL_GPL(xdr_buf_trim); -static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len) +static void __read_bytes_from_xdr_buf(const struct xdr_buf *subbuf, + void *obj, unsigned int len) { unsigned int this_len; @@ -1676,7 +1673,8 @@ static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigne } /* obj is assumed to point to allocated memory of size at least len: */ -int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len) +int read_bytes_from_xdr_buf(const struct xdr_buf *buf, unsigned int base, + void *obj, unsigned int len) { struct xdr_buf subbuf; int status; @@ -1689,7 +1687,8 @@ int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, u } EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf); -static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len) +static void __write_bytes_to_xdr_buf(const struct xdr_buf *subbuf, + void *obj, unsigned int len) { unsigned int this_len; @@ -1706,7 +1705,8 @@ static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned } /* obj is assumed to point to allocated memory of size at least len: */ -int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len) +int write_bytes_to_xdr_buf(const struct xdr_buf *buf, unsigned int base, + void *obj, unsigned int len) { struct xdr_buf subbuf; int status; @@ -1719,8 +1719,7 @@ int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, un } EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf); -int -xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj) +int xdr_decode_word(const struct xdr_buf *buf, unsigned int base, u32 *obj) { __be32 raw; int status; @@ -1733,8 +1732,7 @@ xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj) } EXPORT_SYMBOL_GPL(xdr_decode_word); -int -xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj) +int xdr_encode_word(const struct xdr_buf *buf, unsigned int base, u32 obj) { __be32 raw = cpu_to_be32(obj); @@ -1743,9 +1741,8 @@ xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj) EXPORT_SYMBOL_GPL(xdr_encode_word); /* Returns 0 on success, or else a negative error code. */ -static int -xdr_xcode_array2(struct xdr_buf *buf, unsigned int base, - struct xdr_array2_desc *desc, int encode) +static int xdr_xcode_array2(const struct xdr_buf *buf, unsigned int base, + struct xdr_array2_desc *desc, int encode) { char *elem = NULL, *c; unsigned int copied = 0, todo, avail_here; @@ -1937,9 +1934,8 @@ out: return err; } -int -xdr_decode_array2(struct xdr_buf *buf, unsigned int base, - struct xdr_array2_desc *desc) +int xdr_decode_array2(const struct xdr_buf *buf, unsigned int base, + struct xdr_array2_desc *desc) { if (base >= buf->len) return -EINVAL; @@ -1948,9 +1944,8 @@ xdr_decode_array2(struct xdr_buf *buf, unsigned int base, } EXPORT_SYMBOL_GPL(xdr_decode_array2); -int -xdr_encode_array2(struct xdr_buf *buf, unsigned int base, - struct xdr_array2_desc *desc) +int xdr_encode_array2(const struct xdr_buf *buf, unsigned int base, + struct xdr_array2_desc *desc) { if ((unsigned long) base + 4 + desc->array_len * desc->elem_size > buf->head->iov_len + buf->page_len + buf->tail->iov_len) @@ -1960,9 +1955,9 @@ xdr_encode_array2(struct xdr_buf *buf, unsigned int base, } EXPORT_SYMBOL_GPL(xdr_encode_array2); -int -xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, - int (*actor)(struct scatterlist *, void *), void *data) +int xdr_process_buf(const struct xdr_buf *buf, unsigned int offset, + unsigned int len, + int (*actor)(struct scatterlist *, void *), void *data) { int i, ret = 0; unsigned int page_len, thislen, page_offset; -- cgit v1.2.3-58-ga151 From 5802f7c2a6b876b2810e3e9f26d719961f12e251 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 10 Dec 2020 08:55:35 -0500 Subject: SUNRPC: When expanding the buffer, we may need grow the sparse pages If we're shifting the page data to the right, and this happens to be a sparse page array, then we may need to allocate new pages in order to receive the data. Reported-by: "Mkrtchyan, Tigran" Signed-off-by: Trond Myklebust --- net/sunrpc/xdr.c | 35 +++++++++++++++++++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 2e91fbd70f11..60d4442c5273 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -478,16 +478,47 @@ static void xdr_buf_pages_zero(const struct xdr_buf *buf, unsigned int pgbase, } while ((len -= zero) != 0); } +static unsigned int xdr_buf_pages_fill_sparse(const struct xdr_buf *buf, + unsigned int buflen, gfp_t gfp) +{ + unsigned int i, npages, pagelen; + + if (!(buf->flags & XDRBUF_SPARSE_PAGES)) + return buflen; + if (buflen <= buf->head->iov_len) + return buflen; + pagelen = buflen - buf->head->iov_len; + if (pagelen > buf->page_len) + pagelen = buf->page_len; + npages = (pagelen + buf->page_base + PAGE_SIZE - 1) >> PAGE_SHIFT; + for (i = 0; i < npages; i++) { + if (!buf->pages[i]) + continue; + buf->pages[i] = alloc_page(gfp); + if (likely(buf->pages[i])) + continue; + buflen -= pagelen; + pagelen = i << PAGE_SHIFT; + if (pagelen > buf->page_base) + buflen += pagelen - buf->page_base; + break; + } + return buflen; +} + static void xdr_buf_try_expand(struct xdr_buf *buf, unsigned int len) { struct kvec *head = buf->head; struct kvec *tail = buf->tail; unsigned int sum = head->iov_len + buf->page_len + tail->iov_len; - unsigned int free_space; + unsigned int free_space, newlen; if (sum > buf->len) { free_space = min_t(unsigned int, sum - buf->len, len); - buf->len += free_space; + newlen = xdr_buf_pages_fill_sparse(buf, buf->len + free_space, + GFP_KERNEL); + free_space = newlen - buf->len; + buf->len = newlen; len -= free_space; if (!len) return; -- cgit v1.2.3-58-ga151