summaryrefslogtreecommitdiff
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2021-10-18 19:07:09 +0300
committerJens Axboe <axboe@kernel.dk>2021-10-19 14:55:04 +0300
commit87c037d11b83b93e9ab5eda9fb03c114f67024ff (patch)
tree82afc0ee91329f8b40060b6b2d4841c29369c53d /block/blk-mq.c
parent8a7d267b4a2c71a5ff5dd9046abea7117c7d0ac2 (diff)
downloadlinux-87c037d11b83b93e9ab5eda9fb03c114f67024ff.tar.xz
block: return whether or not to unplug through boolean
Instead of returning the same queue request through a request pointer, use a boolean to accomplish the same. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c16
1 files changed, 9 insertions, 7 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 59809ec24303..335ec3a7eab7 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2432,7 +2432,7 @@ void blk_mq_submit_bio(struct bio *bio)
const int is_flush_fua = op_is_flush(bio->bi_opf);
struct request *rq;
struct blk_plug *plug;
- struct request *same_queue_rq = NULL;
+ bool same_queue_rq = false;
unsigned int nr_segs = 1;
blk_status_t ret;
@@ -2525,6 +2525,8 @@ void blk_mq_submit_bio(struct bio *bio)
/* Insert the request at the IO scheduler queue */
blk_mq_sched_insert_request(rq, false, true, true);
} else if (plug && !blk_queue_nomerges(q)) {
+ struct request *next_rq = NULL;
+
/*
* We do limited plugging. If the bio can be merged, do that.
* Otherwise the existing request in the plug list will be
@@ -2532,19 +2534,19 @@ void blk_mq_submit_bio(struct bio *bio)
* The plug list might get flushed before this. If that happens,
* the plug list is empty, and same_queue_rq is invalid.
*/
- if (list_empty(&plug->mq_list))
- same_queue_rq = NULL;
if (same_queue_rq) {
- list_del_init(&same_queue_rq->queuelist);
+ next_rq = list_last_entry(&plug->mq_list,
+ struct request,
+ queuelist);
+ list_del_init(&next_rq->queuelist);
plug->rq_count--;
}
blk_add_rq_to_plug(plug, rq);
trace_block_plug(q);
- if (same_queue_rq) {
+ if (next_rq) {
trace_block_unplug(q, 1, true);
- blk_mq_try_issue_directly(same_queue_rq->mq_hctx,
- same_queue_rq);
+ blk_mq_try_issue_directly(next_rq->mq_hctx, next_rq);
}
} else if ((q->nr_hw_queues > 1 && is_sync) ||
!rq->mq_hctx->dispatch_busy) {