summaryrefslogtreecommitdiff
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2014-05-27 22:59:49 +0400
committerJens Axboe <axboe@fb.com>2014-05-28 19:49:25 +0400
commit793597a6a95675f4f85671cf747c1d92e7dbc295 (patch)
tree03512fdd3c26e9a7b24890beb84a99b2bc7cd77d /block/blk-mq.c
parenta3bd77567cae6af700dcd245148befc73fc89a50 (diff)
downloadlinux-793597a6a95675f4f85671cf747c1d92e7dbc295.tar.xz
blk-mq: do not use blk_mq_alloc_request_pinned in blk_mq_map_request
We already do a non-blocking allocation in blk_mq_map_request, no need to repeat it. Just call __blk_mq_alloc_request to wait directly. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 3224888d329a..43f0c8ffa92a 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1179,12 +1179,14 @@ static struct request *blk_mq_map_request(struct request_queue *q,
trace_block_getrq(q, bio, rw);
rq = __blk_mq_alloc_request(q, hctx, ctx, rw, GFP_ATOMIC, false);
if (unlikely(!rq)) {
+ __blk_mq_run_hw_queue(hctx);
blk_mq_put_ctx(ctx);
trace_block_sleeprq(q, bio, rw);
- rq = blk_mq_alloc_request_pinned(q, rw, __GFP_WAIT|GFP_ATOMIC,
- false);
- ctx = rq->mq_ctx;
+
+ ctx = blk_mq_get_ctx(q);
hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ rq = __blk_mq_alloc_request(q, hctx, ctx, rw,
+ __GFP_WAIT|GFP_ATOMIC, false);
}
hctx->queued++;