summaryrefslogtreecommitdiff
path: root/block/blk-mq-sched.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-10-30 00:06:13 +0300
committerJens Axboe <axboe@kernel.dk>2018-11-07 23:44:59 +0300
commitea4f995ee8b8f0578b3319949f2edd5d812fdb0a (patch)
treef7516777fbd8b2fc16cf75b846792981c3a07434 /block/blk-mq-sched.c
parent392546aed22009060911f76b6ea24520e2f8b50f (diff)
downloadlinux-ea4f995ee8b8f0578b3319949f2edd5d812fdb0a.tar.xz
blk-mq: cache request hardware queue mapping
We call blk_mq_map_queue() a lot, at least two times for each request per IO, sometimes more. Since we now have an indirect call as well in that function. cache the mapping so we don't have to re-call blk_mq_map_queue() for the same request multiple times. Reviewed-by: Keith Busch <keith.busch@intel.com> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq-sched.c')
-rw-r--r--block/blk-mq-sched.c6
1 files changed, 2 insertions, 4 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index bbabc3877d5a..641df3f00632 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -366,9 +366,7 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
struct request_queue *q = rq->q;
struct elevator_queue *e = q->elevator;
struct blk_mq_ctx *ctx = rq->mq_ctx;
- struct blk_mq_hw_ctx *hctx;
-
- hctx = blk_mq_map_queue(q, rq->cmd_flags, ctx->cpu);
+ struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
/* flush rq in flush machinery need to be dispatched directly */
if (!(rq->rq_flags & RQF_FLUSH_SEQ) && op_is_flush(rq->cmd_flags)) {
@@ -407,7 +405,7 @@ void blk_mq_sched_insert_requests(struct request_queue *q,
/* For list inserts, requests better be on the same hw queue */
rq = list_first_entry(list, struct request, queuelist);
- hctx = blk_mq_map_queue(q, rq->cmd_flags, ctx->cpu);
+ hctx = rq->mq_hctx;
e = hctx->queue->elevator;
if (e && e->type->ops.insert_requests)