summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorLi Lingfeng <lilingfeng3@huawei.com>2023-08-10 06:51:11 +0300
committerJens Axboe <axboe@kernel.dk>2023-08-10 16:20:31 +0300
commit4eb44d10766ac0fae5973998fd2a0103df1d3fe1 (patch)
tree2580c861923412ef148897924352d347cdec321a /block
parentc8659bbb15cd42577a9b16a23b527436b028c8b2 (diff)
downloadlinux-4eb44d10766ac0fae5973998fd2a0103df1d3fe1.tar.xz
block: remove init_mutex and open-code blk_iolatency_try_init
Commit a13696b83da4 ("blk-iolatency: Make initialization lazy") adds a mutex named "init_mutex" in blk_iolatency_try_init for the race condition of initializing RQ_QOS_LATENCY. Now a new lock has been add to struct request_queue by commit a13bd91be223 ("block/rq_qos: protect rq_qos apis with a new lock"). And it has been held in blkg_conf_open_bdev before calling blk_iolatency_init. So it's not necessary to keep init_mutex in blk_iolatency_try_init, just remove it. Since init_mutex has been removed, blk_iolatency_try_init can be open-coded back to iolatency_set_limit() like ioc_qos_write(). Signed-off-by: Li Lingfeng <lilingfeng3@huawei.com> Reviewed-by: Michal Koutný <mkoutny@suse.com> Link: https://lore.kernel.org/r/20230810035111.2236335-1-lilingfeng@huaweicloud.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-iolatency.c35
1 files changed, 11 insertions, 24 deletions
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index fd5fec989e39..c16aef4be036 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -824,29 +824,6 @@ static void iolatency_clear_scaling(struct blkcg_gq *blkg)
}
}
-static int blk_iolatency_try_init(struct blkg_conf_ctx *ctx)
-{
- static DEFINE_MUTEX(init_mutex);
- int ret;
-
- ret = blkg_conf_open_bdev(ctx);
- if (ret)
- return ret;
-
- /*
- * blk_iolatency_init() may fail after rq_qos_add() succeeds which can
- * confuse iolat_rq_qos() test. Make the test and init atomic.
- */
- mutex_lock(&init_mutex);
-
- if (!iolat_rq_qos(ctx->bdev->bd_queue))
- ret = blk_iolatency_init(ctx->bdev->bd_disk);
-
- mutex_unlock(&init_mutex);
-
- return ret;
-}
-
static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
size_t nbytes, loff_t off)
{
@@ -861,7 +838,17 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
blkg_conf_init(&ctx, buf);
- ret = blk_iolatency_try_init(&ctx);
+ ret = blkg_conf_open_bdev(&ctx);
+ if (ret)
+ goto out;
+
+ /*
+ * blk_iolatency_init() may fail after rq_qos_add() succeeds which can
+ * confuse iolat_rq_qos() test. Make the test and init atomic.
+ */
+ lockdep_assert_held(ctx.bdev->bd_queue->rq_qos_mutex);
+ if (!iolat_rq_qos(ctx.bdev->bd_queue))
+ ret = blk_iolatency_init(ctx.bdev->bd_disk);
if (ret)
goto out;