From 3821e232eb3b7591f07ce4c389313ab55ebee372 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 9 Nov 2020 14:39:15 -0500 Subject: xprtrdma: Replace dprintk call sites in ERR_CHUNK path Signed-off-by: Chuck Lever Signed-off-by: Anna Schumaker --- include/trace/events/rpcrdma.h | 82 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) (limited to 'include/trace/events/rpcrdma.h') diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index bf1065772228..d5e66428e27e 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -1128,6 +1128,88 @@ DEFINE_REPLY_EVENT(xprtrdma_reply_rqst); DEFINE_REPLY_EVENT(xprtrdma_reply_short); DEFINE_REPLY_EVENT(xprtrdma_reply_hdr); +TRACE_EVENT(xprtrdma_err_vers, + TP_PROTO( + const struct rpc_rqst *rqst, + __be32 *min, + __be32 *max + ), + + TP_ARGS(rqst, min, max), + + TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(u32, xid) + __field(u32, min) + __field(u32, max) + ), + + TP_fast_assign( + __entry->task_id = rqst->rq_task->tk_pid; + __entry->client_id = rqst->rq_task->tk_client->cl_clid; + __entry->xid = be32_to_cpu(rqst->rq_xid); + __entry->min = be32_to_cpup(min); + __entry->max = be32_to_cpup(max); + ), + + TP_printk("task:%u@%u xid=0x%08x versions=[%u, %u]", + __entry->task_id, __entry->client_id, __entry->xid, + __entry->min, __entry->max + ) +); + +TRACE_EVENT(xprtrdma_err_chunk, + TP_PROTO( + const struct rpc_rqst *rqst + ), + + TP_ARGS(rqst), + + TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(u32, xid) + ), + + TP_fast_assign( + __entry->task_id = rqst->rq_task->tk_pid; + __entry->client_id = rqst->rq_task->tk_client->cl_clid; + __entry->xid = be32_to_cpu(rqst->rq_xid); + ), + + TP_printk("task:%u@%u xid=0x%08x", + __entry->task_id, __entry->client_id, __entry->xid + ) +); + +TRACE_EVENT(xprtrdma_err_unrecognized, + TP_PROTO( + const struct rpc_rqst *rqst, + __be32 *procedure + ), + + TP_ARGS(rqst, procedure), + + TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(u32, xid) + __field(u32, procedure) + ), + + TP_fast_assign( + __entry->task_id = rqst->rq_task->tk_pid; + __entry->client_id = rqst->rq_task->tk_client->cl_clid; + __entry->procedure = be32_to_cpup(procedure); + ), + + TP_printk("task:%u@%u xid=0x%08x procedure=%u", + __entry->task_id, __entry->client_id, __entry->xid, + __entry->procedure + ) +); + TRACE_EVENT(xprtrdma_fixup, TP_PROTO( const struct rpc_rqst *rqst, -- cgit v1.2.3 From af5865d2783958294179da56a4d073a9630b3068 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 9 Nov 2020 14:39:21 -0500 Subject: xprtrdma: Introduce Receive completion IDs Set up a completion ID in each rpcrdma_rep. The ID is used to match an incoming Receive completion to a transport and to a previous ib_post_recv(). Signed-off-by: Chuck Lever Signed-off-by: Anna Schumaker --- include/trace/events/rpcrdma.h | 46 +++++++---------------------------------- net/sunrpc/xprtrdma/verbs.c | 6 +++++- net/sunrpc/xprtrdma/xprt_rdma.h | 5 +++++ 3 files changed, 18 insertions(+), 39 deletions(-) (limited to 'include/trace/events/rpcrdma.h') diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index d5e66428e27e..1c91c8e721e7 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -771,15 +771,17 @@ TRACE_EVENT(xprtrdma_post_recv, TP_ARGS(rep), TP_STRUCT__entry( - __field(const void *, rep) + __field(u32, cq_id) + __field(int, completion_id) ), TP_fast_assign( - __entry->rep = rep; + __entry->cq_id = rep->rr_cid.ci_queue_id; + __entry->completion_id = rep->rr_cid.ci_completion_id; ), - TP_printk("rep=%p", - __entry->rep + TP_printk("cq.id=%d cid=%d", + __entry->cq_id, __entry->completion_id ) ); @@ -845,6 +847,8 @@ TRACE_EVENT(xprtrdma_post_linv, ** Completion events **/ +DEFINE_COMPLETION_EVENT(xprtrdma_wc_receive); + TRACE_EVENT(xprtrdma_wc_send, TP_PROTO( const struct rpcrdma_sendctx *sc, @@ -876,40 +880,6 @@ TRACE_EVENT(xprtrdma_wc_send, ) ); -TRACE_EVENT(xprtrdma_wc_receive, - TP_PROTO( - const struct ib_wc *wc - ), - - TP_ARGS(wc), - - TP_STRUCT__entry( - __field(const void *, rep) - __field(u32, byte_len) - __field(unsigned int, status) - __field(u32, vendor_err) - ), - - TP_fast_assign( - __entry->rep = container_of(wc->wr_cqe, struct rpcrdma_rep, - rr_cqe); - __entry->status = wc->status; - if (wc->status) { - __entry->byte_len = 0; - __entry->vendor_err = wc->vendor_err; - } else { - __entry->byte_len = wc->byte_len; - __entry->vendor_err = 0; - } - ), - - TP_printk("rep=%p %u bytes: %s (%u/0x%x)", - __entry->rep, __entry->byte_len, - rdma_show_wc_status(__entry->status), - __entry->status, __entry->vendor_err - ) -); - DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg); DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li); DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake); diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index ad6e2e4994ce..2c8d2801ec4f 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -186,7 +186,7 @@ static void rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) struct rpcrdma_xprt *r_xprt = cq->cq_context; /* WARNING: Only wr_cqe and status are reliable at this point */ - trace_xprtrdma_wc_receive(wc); + trace_xprtrdma_wc_receive(wc, &rep->rr_cid); --r_xprt->rx_ep->re_receive_count; if (wc->status != IB_WC_SUCCESS) goto out_flushed; @@ -972,6 +972,9 @@ struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt, if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf)) goto out_free_regbuf; + rep->rr_cid.ci_completion_id = + atomic_inc_return(&r_xprt->rx_ep->re_completion_ids); + xdr_buf_init(&rep->rr_hdrbuf, rdmab_data(rep->rr_rdmabuf), rdmab_length(rep->rr_rdmabuf)); rep->rr_cqe.done = rpcrdma_wc_receive; @@ -1411,6 +1414,7 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp) if (!rep) break; + rep->rr_cid.ci_queue_id = ep->re_attr.recv_cq->res.id; trace_xprtrdma_post_recv(rep); rep->rr_recv_wr.next = wr; wr = &rep->rr_recv_wr; diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index 43974ef39a50..b94940bc67aa 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h @@ -53,6 +53,7 @@ #include /* RDMA verbs api */ #include /* rpc_xprt */ +#include /* completion IDs */ #include /* RPC/RDMA protocol */ #include /* xprt parameters */ @@ -93,6 +94,8 @@ struct rpcrdma_ep { unsigned int re_max_requests; /* depends on device */ unsigned int re_inline_send; /* negotiated */ unsigned int re_inline_recv; /* negotiated */ + + atomic_t re_completion_ids; }; /* Pre-allocate extra Work Requests for handling backward receives @@ -180,6 +183,8 @@ enum { struct rpcrdma_rep { struct ib_cqe rr_cqe; + struct rpc_rdma_cid rr_cid; + __be32 rr_xid; __be32 rr_vers; __be32 rr_proc; -- cgit v1.2.3 From b2e7467f26d7813d98cbaad5e62b54960f2c071b Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 9 Nov 2020 14:39:26 -0500 Subject: xprtrdma: Introduce Send completion IDs Set up a completion ID in each rpcrdma_req. The ID is used to match an incoming Send completion to a transport and to a previous ib_post_send(). Signed-off-by: Chuck Lever Signed-off-by: Anna Schumaker --- include/trace/events/rpcrdma.h | 47 ++++++++--------------------------------- net/sunrpc/xprtrdma/verbs.c | 5 ++++- net/sunrpc/xprtrdma/xprt_rdma.h | 1 + 3 files changed, 14 insertions(+), 39 deletions(-) (limited to 'include/trace/events/rpcrdma.h') diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 1c91c8e721e7..ab239f4f924e 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -735,8 +735,8 @@ TRACE_EVENT(xprtrdma_post_send, TP_ARGS(req), TP_STRUCT__entry( - __field(const void *, req) - __field(const void *, sc) + __field(u32, cq_id) + __field(int, completion_id) __field(unsigned int, task_id) __field(unsigned int, client_id) __field(int, num_sge) @@ -745,20 +745,21 @@ TRACE_EVENT(xprtrdma_post_send, TP_fast_assign( const struct rpc_rqst *rqst = &req->rl_slot; + const struct rpcrdma_sendctx *sc = req->rl_sendctx; + __entry->cq_id = sc->sc_cid.ci_queue_id; + __entry->completion_id = sc->sc_cid.ci_completion_id; __entry->task_id = rqst->rq_task->tk_pid; __entry->client_id = rqst->rq_task->tk_client ? rqst->rq_task->tk_client->cl_clid : -1; - __entry->req = req; - __entry->sc = req->rl_sendctx; __entry->num_sge = req->rl_wr.num_sge; __entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED; ), - TP_printk("task:%u@%u req=%p sc=%p (%d SGE%s) %s", + TP_printk("task:%u@%u cq.id=%u cid=%d (%d SGE%s) %s", __entry->task_id, __entry->client_id, - __entry->req, __entry->sc, __entry->num_sge, - (__entry->num_sge == 1 ? "" : "s"), + __entry->cq_id, __entry->completion_id, + __entry->num_sge, (__entry->num_sge == 1 ? "" : "s"), (__entry->signaled ? "signaled" : "") ) ); @@ -848,37 +849,7 @@ TRACE_EVENT(xprtrdma_post_linv, **/ DEFINE_COMPLETION_EVENT(xprtrdma_wc_receive); - -TRACE_EVENT(xprtrdma_wc_send, - TP_PROTO( - const struct rpcrdma_sendctx *sc, - const struct ib_wc *wc - ), - - TP_ARGS(sc, wc), - - TP_STRUCT__entry( - __field(const void *, req) - __field(const void *, sc) - __field(unsigned int, unmap_count) - __field(unsigned int, status) - __field(unsigned int, vendor_err) - ), - - TP_fast_assign( - __entry->req = sc->sc_req; - __entry->sc = sc; - __entry->unmap_count = sc->sc_unmap_count; - __entry->status = wc->status; - __entry->vendor_err = __entry->status ? wc->vendor_err : 0; - ), - - TP_printk("req=%p sc=%p unmapped=%u: %s (%u/0x%x)", - __entry->req, __entry->sc, __entry->unmap_count, - rdma_show_wc_status(__entry->status), - __entry->status, __entry->vendor_err - ) -); +DEFINE_COMPLETION_EVENT(xprtrdma_wc_send); DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg); DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li); diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 2c8d2801ec4f..63837b5d14e5 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -167,7 +167,7 @@ static void rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) struct rpcrdma_xprt *r_xprt = cq->cq_context; /* WARNING: Only wr_cqe and status are reliable at this point */ - trace_xprtrdma_wc_send(sc, wc); + trace_xprtrdma_wc_send(wc, &sc->sc_cid); rpcrdma_sendctx_put_locked(r_xprt, sc); rpcrdma_flush_disconnect(r_xprt, wc); } @@ -643,6 +643,9 @@ static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ep *ep) return NULL; sc->sc_cqe.done = rpcrdma_wc_send; + sc->sc_cid.ci_queue_id = ep->re_attr.send_cq->res.id; + sc->sc_cid.ci_completion_id = + atomic_inc_return(&ep->re_completion_ids); return sc; } diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index b94940bc67aa..4eb8e32b9f4a 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h @@ -216,6 +216,7 @@ enum { struct rpcrdma_req; struct rpcrdma_sendctx { struct ib_cqe sc_cqe; + struct rpc_rdma_cid sc_cid; struct rpcrdma_req *sc_req; unsigned int sc_unmap_count; struct ib_sge sc_sges[]; -- cgit v1.2.3 From 5ecef9c84366955f61e379140c1065d576c66ada Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 9 Nov 2020 14:39:31 -0500 Subject: xprtrdma: Introduce FRWR completion IDs Set up a completion ID in each rpcrdma_frwr. The ID is used to match an incoming completion to a transport (CQ) and other MR-related activity. Signed-off-by: Chuck Lever Signed-off-by: Anna Schumaker --- include/trace/events/rpcrdma.h | 44 ++++------------------------------------- net/sunrpc/xprtrdma/frwr_ops.c | 29 ++++++++++++++++++++------- net/sunrpc/xprtrdma/xprt_rdma.h | 1 + 3 files changed, 27 insertions(+), 47 deletions(-) (limited to 'include/trace/events/rpcrdma.h') diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index ab239f4f924e..9e30f8aa3562 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -261,41 +261,6 @@ DECLARE_EVENT_CLASS(xprtrdma_wrch_event, ), \ TP_ARGS(task, mr, nsegs)) -DECLARE_EVENT_CLASS(xprtrdma_frwr_done, - TP_PROTO( - const struct ib_wc *wc, - const struct rpcrdma_frwr *frwr - ), - - TP_ARGS(wc, frwr), - - TP_STRUCT__entry( - __field(u32, mr_id) - __field(unsigned int, status) - __field(unsigned int, vendor_err) - ), - - TP_fast_assign( - __entry->mr_id = frwr->fr_mr->res.id; - __entry->status = wc->status; - __entry->vendor_err = __entry->status ? wc->vendor_err : 0; - ), - - TP_printk( - "mr.id=%u: %s (%u/0x%x)", - __entry->mr_id, rdma_show_wc_status(__entry->status), - __entry->status, __entry->vendor_err - ) -); - -#define DEFINE_FRWR_DONE_EVENT(name) \ - DEFINE_EVENT(xprtrdma_frwr_done, name, \ - TP_PROTO( \ - const struct ib_wc *wc, \ - const struct rpcrdma_frwr *frwr \ - ), \ - TP_ARGS(wc, frwr)) - TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL); TRACE_DEFINE_ENUM(DMA_TO_DEVICE); TRACE_DEFINE_ENUM(DMA_FROM_DEVICE); @@ -850,11 +815,10 @@ TRACE_EVENT(xprtrdma_post_linv, DEFINE_COMPLETION_EVENT(xprtrdma_wc_receive); DEFINE_COMPLETION_EVENT(xprtrdma_wc_send); - -DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg); -DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li); -DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake); -DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_done); +DEFINE_COMPLETION_EVENT(xprtrdma_wc_fastreg); +DEFINE_COMPLETION_EVENT(xprtrdma_wc_li); +DEFINE_COMPLETION_EVENT(xprtrdma_wc_li_wake); +DEFINE_COMPLETION_EVENT(xprtrdma_wc_li_done); TRACE_EVENT(xprtrdma_frwr_alloc, TP_PROTO( diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index 44888f5badef..2cc6862a52dc 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c @@ -363,12 +363,21 @@ static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc) container_of(cqe, struct rpcrdma_frwr, fr_cqe); /* WARNING: Only wr_cqe and status are reliable at this point */ - trace_xprtrdma_wc_fastreg(wc, frwr); + trace_xprtrdma_wc_fastreg(wc, &frwr->fr_cid); /* The MR will get recycled when the associated req is retransmitted */ rpcrdma_flush_disconnect(cq->cq_context, wc); } +static void frwr_cid_init(struct rpcrdma_ep *ep, + struct rpcrdma_frwr *frwr) +{ + struct rpc_rdma_cid *cid = &frwr->fr_cid; + + cid->ci_queue_id = ep->re_attr.send_cq->res.id; + cid->ci_completion_id = frwr->fr_mr->res.id; +} + /** * frwr_send - post Send WRs containing the RPC Call message * @r_xprt: controlling transport instance @@ -385,6 +394,7 @@ static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc) */ int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) { + struct rpcrdma_ep *ep = r_xprt->rx_ep; struct ib_send_wr *post_wr; struct rpcrdma_mr *mr; @@ -395,6 +405,7 @@ int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) frwr = &mr->frwr; frwr->fr_cqe.done = frwr_wc_fastreg; + frwr_cid_init(ep, frwr); frwr->fr_regwr.wr.next = post_wr; frwr->fr_regwr.wr.wr_cqe = &frwr->fr_cqe; frwr->fr_regwr.wr.num_sge = 0; @@ -404,7 +415,7 @@ int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) post_wr = &frwr->fr_regwr.wr; } - return ib_post_send(r_xprt->rx_ep->re_id->qp, post_wr, NULL); + return ib_post_send(ep->re_id->qp, post_wr, NULL); } /** @@ -448,7 +459,7 @@ static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc) struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr); /* WARNING: Only wr_cqe and status are reliable at this point */ - trace_xprtrdma_wc_li(wc, frwr); + trace_xprtrdma_wc_li(wc, &frwr->fr_cid); __frwr_release_mr(wc, mr); rpcrdma_flush_disconnect(cq->cq_context, wc); @@ -469,7 +480,7 @@ static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc) struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr); /* WARNING: Only wr_cqe and status are reliable at this point */ - trace_xprtrdma_wc_li_wake(wc, frwr); + trace_xprtrdma_wc_li_wake(wc, &frwr->fr_cid); __frwr_release_mr(wc, mr); complete(&frwr->fr_linv_done); @@ -490,6 +501,7 @@ static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc) void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) { struct ib_send_wr *first, **prev, *last; + struct rpcrdma_ep *ep = r_xprt->rx_ep; const struct ib_send_wr *bad_wr; struct rpcrdma_frwr *frwr; struct rpcrdma_mr *mr; @@ -509,6 +521,7 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) frwr = &mr->frwr; frwr->fr_cqe.done = frwr_wc_localinv; + frwr_cid_init(ep, frwr); last = &frwr->fr_invwr; last->next = NULL; last->wr_cqe = &frwr->fr_cqe; @@ -534,7 +547,7 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) * unless re_id->qp is a valid pointer. */ bad_wr = NULL; - rc = ib_post_send(r_xprt->rx_ep->re_id->qp, first, &bad_wr); + rc = ib_post_send(ep->re_id->qp, first, &bad_wr); /* The final LOCAL_INV WR in the chain is supposed to * do the wake. If it was never posted, the wake will @@ -574,7 +587,7 @@ static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc) struct rpcrdma_rep *rep = mr->mr_req->rl_reply; /* WARNING: Only wr_cqe and status are reliable at this point */ - trace_xprtrdma_wc_li_done(wc, frwr); + trace_xprtrdma_wc_li_done(wc, &frwr->fr_cid); __frwr_release_mr(wc, mr); /* Ensure @rep is generated before __frwr_release_mr */ @@ -597,6 +610,7 @@ static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc) void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) { struct ib_send_wr *first, *last, **prev; + struct rpcrdma_ep *ep = r_xprt->rx_ep; const struct ib_send_wr *bad_wr; struct rpcrdma_frwr *frwr; struct rpcrdma_mr *mr; @@ -614,6 +628,7 @@ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) frwr = &mr->frwr; frwr->fr_cqe.done = frwr_wc_localinv; + frwr_cid_init(ep, frwr); last = &frwr->fr_invwr; last->next = NULL; last->wr_cqe = &frwr->fr_cqe; @@ -639,7 +654,7 @@ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) * unless re_id->qp is a valid pointer. */ bad_wr = NULL; - rc = ib_post_send(r_xprt->rx_ep->re_id->qp, first, &bad_wr); + rc = ib_post_send(ep->re_id->qp, first, &bad_wr); if (!rc) return; diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index 4eb8e32b9f4a..cef9d0f2e2c8 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h @@ -231,6 +231,7 @@ struct rpcrdma_sendctx { struct rpcrdma_frwr { struct ib_mr *fr_mr; struct ib_cqe fr_cqe; + struct rpc_rdma_cid fr_cid; struct completion fr_linv_done; union { struct ib_reg_wr fr_regwr; -- cgit v1.2.3 From 36a55edfc3d5b1c2735c088bcb06967de103f299 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 9 Nov 2020 14:39:37 -0500 Subject: xprtrdma: Clean up trace_xprtrdma_post_linv - Replace the display of kernel memory addresses - Add "_err" to the end of its name to indicate that it's a tracepoint that fires only when there's an error Signed-off-by: Chuck Lever Signed-off-by: Anna Schumaker --- include/trace/events/rpcrdma.h | 16 +++++++++------- net/sunrpc/xprtrdma/frwr_ops.c | 4 ++-- 2 files changed, 11 insertions(+), 9 deletions(-) (limited to 'include/trace/events/rpcrdma.h') diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 9e30f8aa3562..b0750c0d2753 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -784,7 +784,7 @@ TRACE_EVENT(xprtrdma_post_recvs, ) ); -TRACE_EVENT(xprtrdma_post_linv, +TRACE_EVENT(xprtrdma_post_linv_err, TP_PROTO( const struct rpcrdma_req *req, int status @@ -793,19 +793,21 @@ TRACE_EVENT(xprtrdma_post_linv, TP_ARGS(req, status), TP_STRUCT__entry( - __field(const void *, req) + __field(unsigned int, task_id) + __field(unsigned int, client_id) __field(int, status) - __field(u32, xid) ), TP_fast_assign( - __entry->req = req; + const struct rpc_task *task = req->rl_slot.rq_task; + + __entry->task_id = task->tk_pid; + __entry->client_id = task->tk_client->cl_clid; __entry->status = status; - __entry->xid = be32_to_cpu(req->rl_slot.rq_xid); ), - TP_printk("req=%p xid=0x%08x status=%d", - __entry->req, __entry->xid, __entry->status + TP_printk("task:%u@%u status=%d", + __entry->task_id, __entry->client_id, __entry->status ) ); diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index 2cc6862a52dc..76322b1acf3d 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c @@ -560,7 +560,7 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) /* Recycle MRs in the LOCAL_INV chain that did not get posted. */ - trace_xprtrdma_post_linv(req, rc); + trace_xprtrdma_post_linv_err(req, rc); while (bad_wr) { frwr = container_of(bad_wr, struct rpcrdma_frwr, fr_invwr); @@ -660,7 +660,7 @@ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) /* Recycle MRs in the LOCAL_INV chain that did not get posted. */ - trace_xprtrdma_post_linv(req, rc); + trace_xprtrdma_post_linv_err(req, rc); while (bad_wr) { frwr = container_of(bad_wr, struct rpcrdma_frwr, fr_invwr); mr = container_of(frwr, struct rpcrdma_mr, frwr); -- cgit v1.2.3 From 3a9568fedccc6cf26c1a87621c3bfed7b7432119 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 9 Nov 2020 14:39:42 -0500 Subject: xprtrdma: Clean up reply parsing error tracepoints - Rename the tracepoints with the "_err" suffix to indicate these are rare error events - Replace display of kernel memory addresses - Tie the XID and error to a connection IP address instead Signed-off-by: Chuck Lever Signed-off-by: Anna Schumaker --- include/trace/events/rpcrdma.h | 27 ++++++++++++++------------- net/sunrpc/xprtrdma/rpc_rdma.c | 10 +++++----- 2 files changed, 19 insertions(+), 18 deletions(-) (limited to 'include/trace/events/rpcrdma.h') diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index b0750c0d2753..93d717d8139f 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -60,7 +60,7 @@ DECLARE_EVENT_CLASS(rpcrdma_completion_class, ), \ TP_ARGS(wc, cid)) -DECLARE_EVENT_CLASS(xprtrdma_reply_event, +DECLARE_EVENT_CLASS(xprtrdma_reply_class, TP_PROTO( const struct rpcrdma_rep *rep ), @@ -68,29 +68,30 @@ DECLARE_EVENT_CLASS(xprtrdma_reply_event, TP_ARGS(rep), TP_STRUCT__entry( - __field(const void *, rep) - __field(const void *, r_xprt) __field(u32, xid) __field(u32, version) __field(u32, proc) + __string(addr, rpcrdma_addrstr(rep->rr_rxprt)) + __string(port, rpcrdma_portstr(rep->rr_rxprt)) ), TP_fast_assign( - __entry->rep = rep; - __entry->r_xprt = rep->rr_rxprt; __entry->xid = be32_to_cpu(rep->rr_xid); __entry->version = be32_to_cpu(rep->rr_vers); __entry->proc = be32_to_cpu(rep->rr_proc); + __assign_str(addr, rpcrdma_addrstr(rep->rr_rxprt)); + __assign_str(port, rpcrdma_portstr(rep->rr_rxprt)); ), - TP_printk("rxprt %p xid=0x%08x rep=%p: version %u proc %u", - __entry->r_xprt, __entry->xid, __entry->rep, - __entry->version, __entry->proc + TP_printk("peer=[%s]:%s xid=0x%08x version=%u proc=%u", + __get_str(addr), __get_str(port), + __entry->xid, __entry->version, __entry->proc ) ); #define DEFINE_REPLY_EVENT(name) \ - DEFINE_EVENT(xprtrdma_reply_event, name, \ + DEFINE_EVENT(xprtrdma_reply_class, \ + xprtrdma_reply_##name##_err, \ TP_PROTO( \ const struct rpcrdma_rep *rep \ ), \ @@ -1030,10 +1031,10 @@ TRACE_EVENT(xprtrdma_defer_cmp, ) ); -DEFINE_REPLY_EVENT(xprtrdma_reply_vers); -DEFINE_REPLY_EVENT(xprtrdma_reply_rqst); -DEFINE_REPLY_EVENT(xprtrdma_reply_short); -DEFINE_REPLY_EVENT(xprtrdma_reply_hdr); +DEFINE_REPLY_EVENT(vers); +DEFINE_REPLY_EVENT(rqst); +DEFINE_REPLY_EVENT(short); +DEFINE_REPLY_EVENT(hdr); TRACE_EVENT(xprtrdma_err_vers, TP_PROTO( diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index c178f93aa40b..29f847c8f609 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (c) 2014-2017 Oracle. All rights reserved. + * Copyright (c) 2014-2020, Oracle and/or its affiliates. * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. * * This software is available to you under a choice of one of two @@ -1369,7 +1369,7 @@ out: return; out_badheader: - trace_xprtrdma_reply_hdr(rep); + trace_xprtrdma_reply_hdr_err(rep); r_xprt->rx_stats.bad_reply_count++; rqst->rq_task->tk_status = status; status = 0; @@ -1462,16 +1462,16 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep) return; out_badversion: - trace_xprtrdma_reply_vers(rep); + trace_xprtrdma_reply_vers_err(rep); goto out; out_norqst: spin_unlock(&xprt->queue_lock); - trace_xprtrdma_reply_rqst(rep); + trace_xprtrdma_reply_rqst_err(rep); goto out; out_shortreply: - trace_xprtrdma_reply_short(rep); + trace_xprtrdma_reply_short_err(rep); out: rpcrdma_recv_buffer_put(rep); -- cgit v1.2.3 From 03ffd92494a53dcc4b98c909ae1f6787d1fec646 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 9 Nov 2020 14:39:47 -0500 Subject: xprtrdma: Clean up tracepoints in the reply path Replace unnecessary display of kernel memory addresses. Also, there are no longer any trace_xprtrdma_defer_cmp() call sites. And remove the trace_xprtrdma_leaked_rep() tracepoint because there doesn't seem to be an overwhelming need to have a tracepoint for catching a software bug that has long since been fixed. Signed-off-by: Chuck Lever Signed-off-by: Anna Schumaker --- include/trace/events/rpcrdma.h | 66 ++---------------------------------------- net/sunrpc/xprtrdma/rpc_rdma.c | 6 ++-- 2 files changed, 5 insertions(+), 67 deletions(-) (limited to 'include/trace/events/rpcrdma.h') diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 93d717d8139f..c28bf17e769b 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -974,17 +974,14 @@ TRACE_EVENT(xprtrdma_reply, TP_PROTO( const struct rpc_task *task, const struct rpcrdma_rep *rep, - const struct rpcrdma_req *req, unsigned int credits ), - TP_ARGS(task, rep, req, credits), + TP_ARGS(task, rep, credits), TP_STRUCT__entry( __field(unsigned int, task_id) __field(unsigned int, client_id) - __field(const void *, rep) - __field(const void *, req) __field(u32, xid) __field(unsigned int, credits) ), @@ -992,42 +989,13 @@ TRACE_EVENT(xprtrdma_reply, TP_fast_assign( __entry->task_id = task->tk_pid; __entry->client_id = task->tk_client->cl_clid; - __entry->rep = rep; - __entry->req = req; __entry->xid = be32_to_cpu(rep->rr_xid); __entry->credits = credits; ), - TP_printk("task:%u@%u xid=0x%08x, %u credits, rep=%p -> req=%p", - __entry->task_id, __entry->client_id, __entry->xid, - __entry->credits, __entry->rep, __entry->req - ) -); - -TRACE_EVENT(xprtrdma_defer_cmp, - TP_PROTO( - const struct rpcrdma_rep *rep - ), - - TP_ARGS(rep), - - TP_STRUCT__entry( - __field(unsigned int, task_id) - __field(unsigned int, client_id) - __field(const void *, rep) - __field(u32, xid) - ), - - TP_fast_assign( - __entry->task_id = rep->rr_rqst->rq_task->tk_pid; - __entry->client_id = rep->rr_rqst->rq_task->tk_client->cl_clid; - __entry->rep = rep; - __entry->xid = be32_to_cpu(rep->rr_xid); - ), - - TP_printk("task:%u@%u xid=0x%08x rep=%p", + TP_printk("task:%u@%u xid=0x%08x credits=%u", __entry->task_id, __entry->client_id, __entry->xid, - __entry->rep + __entry->credits ) ); @@ -1212,34 +1180,6 @@ TRACE_EVENT(xprtrdma_cb_setup, DEFINE_CB_EVENT(xprtrdma_cb_call); DEFINE_CB_EVENT(xprtrdma_cb_reply); -TRACE_EVENT(xprtrdma_leaked_rep, - TP_PROTO( - const struct rpc_rqst *rqst, - const struct rpcrdma_rep *rep - ), - - TP_ARGS(rqst, rep), - - TP_STRUCT__entry( - __field(unsigned int, task_id) - __field(unsigned int, client_id) - __field(u32, xid) - __field(const void *, rep) - ), - - TP_fast_assign( - __entry->task_id = rqst->rq_task->tk_pid; - __entry->client_id = rqst->rq_task->tk_client->cl_clid; - __entry->xid = be32_to_cpu(rqst->rq_xid); - __entry->rep = rep; - ), - - TP_printk("task:%u@%u xid=0x%08x rep=%p", - __entry->task_id, __entry->client_id, __entry->xid, - __entry->rep - ) -); - /** ** Server-side RPC/RDMA events **/ diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index 29f847c8f609..8078559bdc31 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c @@ -1443,14 +1443,12 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep) rpcrdma_post_recvs(r_xprt, false); req = rpcr_to_rdmar(rqst); - if (req->rl_reply) { - trace_xprtrdma_leaked_rep(rqst, req->rl_reply); + if (unlikely(req->rl_reply)) rpcrdma_recv_buffer_put(req->rl_reply); - } req->rl_reply = rep; rep->rr_rqst = rqst; - trace_xprtrdma_reply(rqst->rq_task, rep, req, credits); + trace_xprtrdma_reply(rqst->rq_task, rep, credits); if (rep->rr_wc_flags & IB_WC_WITH_INVALIDATE) frwr_reminv(rep, &req->rl_registered); -- cgit v1.2.3 From d11e934606ef6ce37a4bcbe89f34faf37347abb1 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 9 Nov 2020 14:39:53 -0500 Subject: xprtrdma: Clean up xprtrdma callback tracepoints - Replace displayed kernel memory addresses - Tie the XID and event with the peer's IP address Signed-off-by: Chuck Lever Signed-off-by: Anna Schumaker --- include/trace/events/rpcrdma.h | 31 ++++++++++++++++--------------- net/sunrpc/xprtrdma/backchannel.c | 6 +++--- 2 files changed, 19 insertions(+), 18 deletions(-) (limited to 'include/trace/events/rpcrdma.h') diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index c28bf17e769b..6bdbe1165270 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -313,38 +313,39 @@ DECLARE_EVENT_CLASS(xprtrdma_mr, ), \ TP_ARGS(mr)) -DECLARE_EVENT_CLASS(xprtrdma_cb_event, +DECLARE_EVENT_CLASS(xprtrdma_callback_class, TP_PROTO( + const struct rpcrdma_xprt *r_xprt, const struct rpc_rqst *rqst ), - TP_ARGS(rqst), + TP_ARGS(r_xprt, rqst), TP_STRUCT__entry( - __field(const void *, rqst) - __field(const void *, rep) - __field(const void *, req) __field(u32, xid) + __string(addr, rpcrdma_addrstr(r_xprt)) + __string(port, rpcrdma_portstr(r_xprt)) ), TP_fast_assign( - __entry->rqst = rqst; - __entry->req = rpcr_to_rdmar(rqst); - __entry->rep = rpcr_to_rdmar(rqst)->rl_reply; __entry->xid = be32_to_cpu(rqst->rq_xid); + __assign_str(addr, rpcrdma_addrstr(r_xprt)); + __assign_str(port, rpcrdma_portstr(r_xprt)); ), - TP_printk("xid=0x%08x, rqst=%p req=%p rep=%p", - __entry->xid, __entry->rqst, __entry->req, __entry->rep + TP_printk("peer=[%s]:%s xid=0x%08x", + __get_str(addr), __get_str(port), __entry->xid ) ); -#define DEFINE_CB_EVENT(name) \ - DEFINE_EVENT(xprtrdma_cb_event, name, \ +#define DEFINE_CALLBACK_EVENT(name) \ + DEFINE_EVENT(xprtrdma_callback_class, \ + xprtrdma_cb_##name, \ TP_PROTO( \ + const struct rpcrdma_xprt *r_xprt, \ const struct rpc_rqst *rqst \ ), \ - TP_ARGS(rqst)) + TP_ARGS(r_xprt, rqst)) /** ** Connection events @@ -1177,8 +1178,8 @@ TRACE_EVENT(xprtrdma_cb_setup, ) ); -DEFINE_CB_EVENT(xprtrdma_cb_call); -DEFINE_CB_EVENT(xprtrdma_cb_reply); +DEFINE_CALLBACK_EVENT(call); +DEFINE_CALLBACK_EVENT(reply); /** ** Server-side RPC/RDMA events diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c index c92c1aac270a..946edf2db646 100644 --- a/net/sunrpc/xprtrdma/backchannel.c +++ b/net/sunrpc/xprtrdma/backchannel.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2015 Oracle. All rights reserved. + * Copyright (c) 2015-2020, Oracle and/or its affiliates. * * Support for backward direction RPCs on RPC/RDMA. */ @@ -82,7 +82,7 @@ static int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst) &rqst->rq_snd_buf, rpcrdma_noch_pullup)) return -EIO; - trace_xprtrdma_cb_reply(rqst); + trace_xprtrdma_cb_reply(r_xprt, rqst); return 0; } @@ -260,7 +260,7 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt, */ req = rpcr_to_rdmar(rqst); req->rl_reply = rep; - trace_xprtrdma_cb_call(rqst); + trace_xprtrdma_cb_call(r_xprt, rqst); /* Queue rqst for ULP's callback service */ bc_serv = xprt->bc_serv; -- cgit v1.2.3 From 0307cdec7c3412d7665363ab0cd61fccf82bfb2d Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 9 Nov 2020 14:39:58 -0500 Subject: xprtrdma: Clean up trace_xprtrdma_nomrs() - Rename it following the "_err" suffix convention - Replace display of kernel memory addresses - Tie MR exhaustion to a peer IP address, similar to the createmrs tracepoint Signed-off-by: Chuck Lever Signed-off-by: Anna Schumaker --- include/trace/events/rpcrdma.h | 19 ++++++++++--------- net/sunrpc/xprtrdma/rpc_rdma.c | 2 +- 2 files changed, 11 insertions(+), 10 deletions(-) (limited to 'include/trace/events/rpcrdma.h') diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 6bdbe1165270..4fcda2a25bb8 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -545,32 +545,33 @@ TRACE_EVENT(xprtrdma_mr_get, ) ); -TRACE_EVENT(xprtrdma_nomrs, +TRACE_EVENT(xprtrdma_nomrs_err, TP_PROTO( + const struct rpcrdma_xprt *r_xprt, const struct rpcrdma_req *req ), - TP_ARGS(req), + TP_ARGS(r_xprt, req), TP_STRUCT__entry( - __field(const void *, req) __field(unsigned int, task_id) __field(unsigned int, client_id) - __field(u32, xid) + __string(addr, rpcrdma_addrstr(r_xprt)) + __string(port, rpcrdma_portstr(r_xprt)) ), TP_fast_assign( const struct rpc_rqst *rqst = &req->rl_slot; - __entry->req = req; __entry->task_id = rqst->rq_task->tk_pid; __entry->client_id = rqst->rq_task->tk_client->cl_clid; - __entry->xid = be32_to_cpu(rqst->rq_xid); + __assign_str(addr, rpcrdma_addrstr(r_xprt)); + __assign_str(port, rpcrdma_portstr(r_xprt)); ), - TP_printk("task:%u@%u xid=0x%08x req=%p", - __entry->task_id, __entry->client_id, __entry->xid, - __entry->req + TP_printk("peer=[%s]:%s task:%u@%u", + __get_str(addr), __get_str(port), + __entry->task_id, __entry->client_id ) ); diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index 8078559bdc31..f27eb2322b38 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c @@ -323,7 +323,7 @@ static struct rpcrdma_mr_seg *rpcrdma_mr_prepare(struct rpcrdma_xprt *r_xprt, return frwr_map(r_xprt, seg, nsegs, writing, req->rl_slot.rq_xid, *mr); out_getmr_err: - trace_xprtrdma_nomrs(req); + trace_xprtrdma_nomrs_err(r_xprt, req); xprt_wait_for_buffer_space(&r_xprt->rx_xprt); rpcrdma_mrs_refresh(r_xprt); return ERR_PTR(-EAGAIN); -- cgit v1.2.3 From 7703db978d4cf7c51426183b7c0d03c039757a44 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 9 Nov 2020 14:40:03 -0500 Subject: xprtrdma: Display the task ID when reporting MR events Tie each MR event to the requesting rpc_task to make it easier to follow MR ownership and control flow. MR unmapping and recycling can happen in the background, after an MR's mr_req field is stale, so set up a separate tracepoint class for those events. Signed-off-by: Chuck Lever Signed-off-by: Anna Schumaker --- include/trace/events/rpcrdma.h | 96 +++++++++++++++++++++++++----------------- net/sunrpc/xprtrdma/frwr_ops.c | 1 - net/sunrpc/xprtrdma/rpc_rdma.c | 1 - 3 files changed, 58 insertions(+), 40 deletions(-) (limited to 'include/trace/events/rpcrdma.h') diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 4fcda2a25bb8..166bbeef996c 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -274,7 +274,55 @@ TRACE_DEFINE_ENUM(DMA_NONE); { DMA_FROM_DEVICE, "FROM_DEVICE" }, \ { DMA_NONE, "NONE" }) -DECLARE_EVENT_CLASS(xprtrdma_mr, +DECLARE_EVENT_CLASS(xprtrdma_mr_class, + TP_PROTO( + const struct rpcrdma_mr *mr + ), + + TP_ARGS(mr), + + TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(u32, mr_id) + __field(int, nents) + __field(u32, handle) + __field(u32, length) + __field(u64, offset) + __field(u32, dir) + ), + + TP_fast_assign( + const struct rpcrdma_req *req = mr->mr_req; + const struct rpc_task *task = req->rl_slot.rq_task; + + __entry->task_id = task->tk_pid; + __entry->client_id = task->tk_client->cl_clid; + __entry->mr_id = mr->frwr.fr_mr->res.id; + __entry->nents = mr->mr_nents; + __entry->handle = mr->mr_handle; + __entry->length = mr->mr_length; + __entry->offset = mr->mr_offset; + __entry->dir = mr->mr_dir; + ), + + TP_printk("task:%u@%u mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)", + __entry->task_id, __entry->client_id, + __entry->mr_id, __entry->nents, __entry->length, + (unsigned long long)__entry->offset, __entry->handle, + xprtrdma_show_direction(__entry->dir) + ) +); + +#define DEFINE_MR_EVENT(name) \ + DEFINE_EVENT(xprtrdma_mr_class, \ + xprtrdma_mr_##name, \ + TP_PROTO( \ + const struct rpcrdma_mr *mr \ + ), \ + TP_ARGS(mr)) + +DECLARE_EVENT_CLASS(xprtrdma_anonymous_mr_class, TP_PROTO( const struct rpcrdma_mr *mr ), @@ -306,11 +354,12 @@ DECLARE_EVENT_CLASS(xprtrdma_mr, ) ); -#define DEFINE_MR_EVENT(name) \ - DEFINE_EVENT(xprtrdma_mr, xprtrdma_mr_##name, \ - TP_PROTO( \ - const struct rpcrdma_mr *mr \ - ), \ +#define DEFINE_ANON_MR_EVENT(name) \ + DEFINE_EVENT(xprtrdma_anonymous_mr_class, \ + xprtrdma_mr_##name, \ + TP_PROTO( \ + const struct rpcrdma_mr *mr \ + ), \ TP_ARGS(mr)) DECLARE_EVENT_CLASS(xprtrdma_callback_class, @@ -516,35 +565,6 @@ TRACE_EVENT(xprtrdma_createmrs, ) ); -TRACE_EVENT(xprtrdma_mr_get, - TP_PROTO( - const struct rpcrdma_req *req - ), - - TP_ARGS(req), - - TP_STRUCT__entry( - __field(const void *, req) - __field(unsigned int, task_id) - __field(unsigned int, client_id) - __field(u32, xid) - ), - - TP_fast_assign( - const struct rpc_rqst *rqst = &req->rl_slot; - - __entry->req = req; - __entry->task_id = rqst->rq_task->tk_pid; - __entry->client_id = rqst->rq_task->tk_client->cl_clid; - __entry->xid = be32_to_cpu(rqst->rq_xid); - ), - - TP_printk("task:%u@%u xid=0x%08x req=%p", - __entry->task_id, __entry->client_id, __entry->xid, - __entry->req - ) -); - TRACE_EVENT(xprtrdma_nomrs_err, TP_PROTO( const struct rpcrdma_xprt *r_xprt, @@ -946,9 +966,9 @@ TRACE_EVENT(xprtrdma_frwr_maperr, DEFINE_MR_EVENT(localinv); DEFINE_MR_EVENT(map); -DEFINE_MR_EVENT(unmap); -DEFINE_MR_EVENT(reminv); -DEFINE_MR_EVENT(recycle); + +DEFINE_ANON_MR_EVENT(unmap); +DEFINE_ANON_MR_EVENT(recycle); TRACE_EVENT(xprtrdma_dma_maperr, TP_PROTO( diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index 76322b1acf3d..cb2f92409c2f 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c @@ -431,7 +431,6 @@ void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs) list_for_each_entry(mr, mrs, mr_list) if (mr->mr_handle == rep->rr_inv_rkey) { list_del_init(&mr->mr_list); - trace_xprtrdma_mr_reminv(mr); rpcrdma_mr_put(mr); break; /* only one invalidated MR per RPC */ } diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index f27eb2322b38..9ed89872ec75 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c @@ -315,7 +315,6 @@ static struct rpcrdma_mr_seg *rpcrdma_mr_prepare(struct rpcrdma_xprt *r_xprt, *mr = rpcrdma_mr_get(r_xprt); if (!*mr) goto out_getmr_err; - trace_xprtrdma_mr_get(req); (*mr)->mr_req = req; } -- cgit v1.2.3 From 8e24e191d44f49f08f857f0ebc6fe91961cd1a09 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 9 Nov 2020 14:40:08 -0500 Subject: xprtrdma: Trace unmap_sync calls ->buf_free is called nearly once per RPC. Only rarely does xprt_rdma_free() have to do anything, thus tracing every one of these calls seems unnecessary. Instead, just throw a trace event when that one occasional RPC still has MRs that need to be released. xprt_rdma_free() is further micro-optimized to reduce the amount of work done in the common case. Signed-off-by: Chuck Lever Signed-off-by: Anna Schumaker --- include/trace/events/rpcrdma.h | 22 ++++++++++++++++++++++ net/sunrpc/xprtrdma/transport.c | 7 ++++--- 2 files changed, 26 insertions(+), 3 deletions(-) (limited to 'include/trace/events/rpcrdma.h') diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 166bbeef996c..69e1caf7e882 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -1167,6 +1167,28 @@ TRACE_EVENT(xprtrdma_decode_seg, ) ); +TRACE_EVENT(xprtrdma_mrs_zap, + TP_PROTO( + const struct rpc_task *task + ), + + TP_ARGS(task), + + TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) + ), + + TP_fast_assign( + __entry->task_id = task->tk_pid; + __entry->client_id = task->tk_client->cl_clid; + ), + + TP_printk("task:%u@%u", + __entry->task_id, __entry->client_id + ) +); + /** ** Callback events **/ diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 8915e42240d3..bb3ed3db6c0a 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c @@ -599,11 +599,12 @@ static void xprt_rdma_free(struct rpc_task *task) { struct rpc_rqst *rqst = task->tk_rqstp; - struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt); struct rpcrdma_req *req = rpcr_to_rdmar(rqst); - if (!list_empty(&req->rl_registered)) - frwr_unmap_sync(r_xprt, req); + if (unlikely(!list_empty(&req->rl_registered))) { + trace_xprtrdma_mrs_zap(task); + frwr_unmap_sync(rpcx_to_rdmax(rqst->rq_xprt), req); + } /* XXX: If the RPC is completing because of a signal and * not because a reply was received, we ought to ensure -- cgit v1.2.3