summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2022-01-04 16:42:23 +0300
committerJens Axboe <axboe@kernel.dk>2022-01-10 04:54:52 +0300
commit9d497e2941c30a060ba62d5485b3bc9d91ffb09e (patch)
treef5e954af14ed82e4023395d58c0c0073624da116 /block
parent292c33c95defd0b814fec1fc8cd60d16556cf7b8 (diff)
downloadlinux-9d497e2941c30a060ba62d5485b3bc9d91ffb09e.tar.xz
block: don't protect submit_bio_checks by q_usage_counter
Commit cc9c884dd7f4 ("block: call submit_bio_checks under q_usage_counter") uses q_usage_counter to protect submit_bio_checks for avoiding IO after disk is deleted by del_gendisk(). Turns out the protection isn't necessary, because once blk_mq_freeze_queue_wait() in del_gendisk() returns: 1) all in-flight IO has been done 2) all new IO will be failed in __bio_queue_enter() because q_usage_counter is dead, and GD_DEAD is set 3) both disk and request queue instance are safe since caller of submit_bio() guarantees that the disk can't be closed. Once submit_bio_checks() needn't the protection of q_usage_counter, we can move submit_bio_checks before calling blk_mq_submit_bio() and ->submit_bio(). With this change, we needn't to throttle queue with holding one allocated request, then precise driver tag or request won't be wasted in throttling. Meantime we can unify the bio check for both bio based and request based driver. Cc: Christoph Hellwig <hch@lst.de> Signed-off-by: Ming Lei <ming.lei@redhat.com> Link: https://lore.kernel.org/r/20220104134223.590803-1-ming.lei@redhat.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c14
-rw-r--r--block/blk-mq.c39
2 files changed, 22 insertions, 31 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 10619fd83c1b..97f8bc8d3a79 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -787,17 +787,21 @@ end_io:
static void __submit_bio_fops(struct gendisk *disk, struct bio *bio)
{
- if (unlikely(bio_queue_enter(bio) != 0))
- return;
- if (submit_bio_checks(bio) && blk_crypto_bio_prep(&bio))
- disk->fops->submit_bio(bio);
- blk_queue_exit(disk->queue);
+ if (blk_crypto_bio_prep(&bio)) {
+ if (likely(bio_queue_enter(bio) == 0)) {
+ disk->fops->submit_bio(bio);
+ blk_queue_exit(disk->queue);
+ }
+ }
}
static void __submit_bio(struct bio *bio)
{
struct gendisk *disk = bio->bi_bdev->bd_disk;
+ if (unlikely(!submit_bio_checks(bio)))
+ return;
+
if (!disk->fops->submit_bio)
blk_mq_submit_bio(bio);
else
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 0d7c9d3e0329..a6d4780580fc 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2714,26 +2714,18 @@ static bool blk_mq_attempt_bio_merge(struct request_queue *q,
static struct request *blk_mq_get_new_requests(struct request_queue *q,
struct blk_plug *plug,
- struct bio *bio,
- unsigned int nsegs)
+ struct bio *bio)
{
struct blk_mq_alloc_data data = {
.q = q,
.nr_tags = 1,
+ .cmd_flags = bio->bi_opf,
};
struct request *rq;
if (unlikely(bio_queue_enter(bio)))
return NULL;
- if (unlikely(!submit_bio_checks(bio)))
- goto queue_exit;
- if (blk_mq_attempt_bio_merge(q, bio, nsegs))
- goto queue_exit;
- rq_qos_throttle(q, bio);
-
- /* ->bi_opf is finalized after submit_bio_checks() returns */
- data.cmd_flags = bio->bi_opf;
if (plug) {
data.nr_tags = plug->nr_ios;
plug->nr_ios = 1;
@@ -2746,13 +2738,12 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
rq_qos_cleanup(q, bio);
if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio);
-queue_exit:
blk_queue_exit(q);
return NULL;
}
static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
- struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
+ struct blk_plug *plug, struct bio *bio)
{
struct request *rq;
@@ -2762,21 +2753,14 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
if (!rq || rq->q != q)
return NULL;
- if (unlikely(!submit_bio_checks(*bio)))
- return NULL;
- if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) {
- *bio = NULL;
+ if (blk_mq_get_hctx_type(bio->bi_opf) != rq->mq_hctx->type)
return NULL;
- }
- if (blk_mq_get_hctx_type((*bio)->bi_opf) != rq->mq_hctx->type)
- return NULL;
- if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
+ if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
return NULL;
- rq->cmd_flags = (*bio)->bi_opf;
+ rq->cmd_flags = bio->bi_opf;
plug->cached_rq = rq_list_next(rq);
INIT_LIST_HEAD(&rq->queuelist);
- rq_qos_throttle(q, *bio);
return rq;
}
@@ -2812,11 +2796,14 @@ void blk_mq_submit_bio(struct bio *bio)
if (!bio_integrity_prep(bio))
return;
- rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs);
+ if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
+ return;
+
+ rq_qos_throttle(q, bio);
+
+ rq = blk_mq_get_cached_request(q, plug, bio);
if (!rq) {
- if (!bio)
- return;
- rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
+ rq = blk_mq_get_new_requests(q, plug, bio);
if (unlikely(!rq))
return;
}