summaryrefslogtreecommitdiff
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2021-11-03 14:47:09 +0300
committerJens Axboe <axboe@kernel.dk>2021-11-05 08:20:10 +0300
commit900e080752025f0016128f07c9ed4c50eba3654b (patch)
tree0429d43fed6fcb41ab394987d423bc08a042e730 /block/blk-mq.c
parentc98cb5bbdab10d187aff9b4e386210eb2332af96 (diff)
downloadlinux-900e080752025f0016128f07c9ed4c50eba3654b.tar.xz
block: move queue enter logic into blk_mq_submit_bio()
Retain the old logic for the fops based submit, but for our internal blk_mq_submit_bio(), move the queue entering logic into the core function itself. We need to be a bit careful if going into the scheduler, as a scheduler or queue mappings can arbitrarily change before we have entered the queue. Have the bio scheduler mapping do that separately, it's a very cheap operation compared to actually doing merging locking and lookups. Reviewed-by: Christoph Hellwig <hch@lst.de> [axboe: update to check merge post submit_bio_checks() doing remap...] Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c60
1 files changed, 41 insertions, 19 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index dcb413297a96..5fe40c85a308 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2478,9 +2478,23 @@ static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
return BLK_MAX_REQUEST_COUNT;
}
+static bool blk_attempt_bio_merge(struct request_queue *q, struct bio *bio,
+ unsigned int nr_segs, bool *same_queue_rq)
+{
+ if (!blk_queue_nomerges(q) && bio_mergeable(bio)) {
+ if (blk_attempt_plug_merge(q, bio, nr_segs, same_queue_rq))
+ return true;
+ if (blk_mq_sched_bio_merge(q, bio, nr_segs))
+ return true;
+ }
+ return false;
+}
+
static struct request *blk_mq_get_new_requests(struct request_queue *q,
struct blk_plug *plug,
- struct bio *bio)
+ struct bio *bio,
+ unsigned int nsegs,
+ bool *same_queue_rq)
{
struct blk_mq_alloc_data data = {
.q = q,
@@ -2489,6 +2503,15 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
};
struct request *rq;
+ if (unlikely(bio_queue_enter(bio)))
+ return NULL;
+ if (unlikely(!submit_bio_checks(bio)))
+ goto put_exit;
+ if (blk_attempt_bio_merge(q, bio, nsegs, same_queue_rq))
+ goto put_exit;
+
+ rq_qos_throttle(q, bio);
+
if (plug) {
data.nr_tags = plug->nr_ios;
plug->nr_ios = 1;
@@ -2502,25 +2525,34 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
rq_qos_cleanup(q, bio);
if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio);
+put_exit:
+ blk_queue_exit(q);
return NULL;
}
static inline struct request *blk_mq_get_request(struct request_queue *q,
struct blk_plug *plug,
- struct bio *bio)
+ struct bio *bio,
+ unsigned int nsegs,
+ bool *same_queue_rq)
{
if (plug) {
struct request *rq;
rq = rq_list_peek(&plug->cached_rq);
if (rq) {
+ if (unlikely(!submit_bio_checks(bio)))
+ return NULL;
+ if (blk_attempt_bio_merge(q, bio, nsegs, same_queue_rq))
+ return NULL;
plug->cached_rq = rq_list_next(rq);
INIT_LIST_HEAD(&rq->queuelist);
+ rq_qos_throttle(q, bio);
return rq;
}
}
- return blk_mq_get_new_requests(q, plug, bio);
+ return blk_mq_get_new_requests(q, plug, bio, nsegs, same_queue_rq);
}
/**
@@ -2546,26 +2578,20 @@ void blk_mq_submit_bio(struct bio *bio)
unsigned int nr_segs = 1;
blk_status_t ret;
+ if (unlikely(!blk_crypto_bio_prep(&bio)))
+ return;
+
blk_queue_bounce(q, &bio);
if (blk_may_split(q, bio))
__blk_queue_split(q, &bio, &nr_segs);
if (!bio_integrity_prep(bio))
- goto queue_exit;
-
- if (!blk_queue_nomerges(q) && bio_mergeable(bio)) {
- if (blk_attempt_plug_merge(q, bio, nr_segs, &same_queue_rq))
- goto queue_exit;
- if (blk_mq_sched_bio_merge(q, bio, nr_segs))
- goto queue_exit;
- }
-
- rq_qos_throttle(q, bio);
+ return;
plug = blk_mq_plug(q, bio);
- rq = blk_mq_get_request(q, plug, bio);
+ rq = blk_mq_get_request(q, plug, bio, nr_segs, &same_queue_rq);
if (unlikely(!rq))
- goto queue_exit;
+ return;
trace_block_getrq(bio);
@@ -2646,10 +2672,6 @@ void blk_mq_submit_bio(struct bio *bio)
/* Default case. */
blk_mq_sched_insert_request(rq, false, true, true);
}
-
- return;
-queue_exit:
- blk_queue_exit(q);
}
static size_t order_to_size(unsigned int order)