summaryrefslogtreecommitdiff
path: root/net/sunrpc/xprt.c
diff options
context:
space:
mode:
authorTrond Myklebust <trond.myklebust@hammerspace.com>2018-09-09 18:37:22 +0300
committerTrond Myklebust <trond.myklebust@hammerspace.com>2018-09-30 22:35:15 +0300
commit918f3c1fe83c5baa4892b943d3f5ac7191d8fb74 (patch)
treebf518a11df0b2fe4a73e2d38355e4be2c6e94563 /net/sunrpc/xprt.c
parentdcbbeda836bc748e8fecd753b83d5b345ef8ec31 (diff)
downloadlinux-918f3c1fe83c5baa4892b943d3f5ac7191d8fb74.tar.xz
SUNRPC: Improve latency for interactive tasks
One of the intentions with the priority queues was to ensure that no single process can hog the transport. The field task->tk_owner therefore identifies the RPC call's origin, and is intended to allow the RPC layer to organise queues for fairness. This commit therefore modifies the transmit queue to group requests by task->tk_owner, and ensures that we round robin among those groups. Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
Diffstat (limited to 'net/sunrpc/xprt.c')
-rw-r--r--net/sunrpc/xprt.c27
1 files changed, 24 insertions, 3 deletions
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 9c5a8514d264..44d0eeaddaac 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -1053,12 +1053,21 @@ xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req)
void
xprt_request_enqueue_transmit(struct rpc_task *task)
{
- struct rpc_rqst *req = task->tk_rqstp;
+ struct rpc_rqst *pos, *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt;
if (xprt_request_need_enqueue_transmit(task, req)) {
spin_lock(&xprt->queue_lock);
+ list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
+ if (pos->rq_task->tk_owner != task->tk_owner)
+ continue;
+ list_add_tail(&req->rq_xmit2, &pos->rq_xmit2);
+ INIT_LIST_HEAD(&req->rq_xmit);
+ goto out;
+ }
list_add_tail(&req->rq_xmit, &xprt->xmit_queue);
+ INIT_LIST_HEAD(&req->rq_xmit2);
+out:
set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
spin_unlock(&xprt->queue_lock);
}
@@ -1074,8 +1083,20 @@ xprt_request_enqueue_transmit(struct rpc_task *task)
static void
xprt_request_dequeue_transmit_locked(struct rpc_task *task)
{
- if (test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
- list_del(&task->tk_rqstp->rq_xmit);
+ struct rpc_rqst *req = task->tk_rqstp;
+
+ if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
+ return;
+ if (!list_empty(&req->rq_xmit)) {
+ list_del(&req->rq_xmit);
+ if (!list_empty(&req->rq_xmit2)) {
+ struct rpc_rqst *next = list_first_entry(&req->rq_xmit2,
+ struct rpc_rqst, rq_xmit2);
+ list_del(&req->rq_xmit2);
+ list_add_tail(&next->rq_xmit, &next->rq_xprt->xmit_queue);
+ }
+ } else
+ list_del(&req->rq_xmit2);
}
/**