summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorOmar Sandoval <osandov@fb.com>2017-04-14 11:00:01 +0300
committerJens Axboe <axboe@fb.com>2017-04-14 23:06:57 +0300
commitc05f8525f67b7d6489b0502211d4ed35622d9beb (patch)
treefe474e51721e497374bbf6e674f883e54138e699 /block
parent5b72727299307e53888277729f980ab03264dac8 (diff)
downloadlinux-c05f8525f67b7d6489b0502211d4ed35622d9beb.tar.xz
blk-mq-sched: make completed_request() callback more useful
Currently, this callback is called right after put_request() and has no distinguishable purpose. Instead, let's call it before put_request() as soon as I/O has completed on the request, before we account it in blk-stat. With this, Kyber can enable stats when it sees a latency outlier and make sure the outlier gets accounted. Signed-off-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq-sched.h11
-rw-r--r--block/blk-mq.c5
2 files changed, 7 insertions, 9 deletions
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index f4bc186c3440..120c6abc37cc 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -82,17 +82,12 @@ blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
return true;
}
-static inline void
-blk_mq_sched_completed_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
+static inline void blk_mq_sched_completed_request(struct request *rq)
{
- struct elevator_queue *e = hctx->queue->elevator;
+ struct elevator_queue *e = rq->q->elevator;
if (e && e->type->ops.mq.completed_request)
- e->type->ops.mq.completed_request(hctx, rq);
-
- BUG_ON(rq->internal_tag == -1);
-
- blk_mq_put_tag(hctx, hctx->sched_tags, rq->mq_ctx, rq->internal_tag);
+ e->type->ops.mq.completed_request(rq);
}
static inline void blk_mq_sched_started_request(struct request *rq)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 7138cd98146e..e2ef7b460924 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -350,7 +350,7 @@ void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
if (rq->tag != -1)
blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
if (sched_tag != -1)
- blk_mq_sched_completed_request(hctx, rq);
+ blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag);
blk_mq_sched_restart(hctx);
blk_queue_exit(q);
}
@@ -444,6 +444,9 @@ static void __blk_mq_complete_request(struct request *rq)
{
struct request_queue *q = rq->q;
+ if (rq->internal_tag != -1)
+ blk_mq_sched_completed_request(rq);
+
blk_mq_stat_add(rq);
if (!q->softirq_done_fn)