From 224e858f215a3d6304f95a92357a1753475ca9cf Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Sat, 29 Oct 2022 09:04:29 +0800 Subject: ublk_drv: return flag of UBLK_F_URING_CMD_COMP_IN_TASK in case of module UBLK_F_URING_CMD_COMP_IN_TASK needs to be set and returned to userspace if ublk driver is built as module, otherwise userspace may get wrong flags shown. Fixes: 71f28f3136af ("ublk_drv: add io_uring based userspace block driver") Signed-off-by: Ming Lei Reviewed-by: ZiyangZhang Link: https://lore.kernel.org/r/20221029010432.598367-2-ming.lei@redhat.com Signed-off-by: Jens Axboe --- drivers/block/ublk_drv.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 5afce6ffaadf..6b2f214f0d5c 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -1658,6 +1658,9 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd) */ ub->dev_info.flags &= UBLK_F_ALL; + if (!IS_BUILTIN(CONFIG_BLK_DEV_UBLK)) + ub->dev_info.flags |= UBLK_F_URING_CMD_COMP_IN_TASK; + /* We are not ready to support zero copy */ ub->dev_info.flags &= ~UBLK_F_SUPPORT_ZERO_COPY; -- cgit v1.2.3 From d57c2c6c1145148bb23d68db73de0b52d482d4ba Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Sat, 29 Oct 2022 09:04:30 +0800 Subject: ublk_drv: comment on ublk_driver entry of Kconfig Add help info for choosing to build ublk_drv as module or builtin. Signed-off-by: Ming Lei Reviewed-by: ZiyangZhang Link: https://lore.kernel.org/r/20221029010432.598367-3-ming.lei@redhat.com Signed-off-by: Jens Axboe --- drivers/block/Kconfig | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index db1b4b202646..a41145d52de9 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -408,6 +408,12 @@ config BLK_DEV_UBLK definition isn't finalized yet, and might change according to future requirement, so mark is as experimental now. + Say Y if you want to get better performance because task_work_add() + can be used in IO path for replacing io_uring cmd, which will become + shared between IO tasks and ubq daemon, meantime task_work_add() can + can handle batch more effectively, but task_work_add() isn't exported + for module, so ublk has to be built to kernel. + source "drivers/block/rnbd/Kconfig" endif # BLK_DEV -- cgit v1.2.3 From 3ab6e94ca539242247d4f00414a1bde584d001ed Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Sat, 29 Oct 2022 09:04:31 +0800 Subject: ublk_drv: avoid to touch io_uring cmd in blk_mq io path io_uring cmd is supposed to be used in ubq daemon context mainly, and we should try to avoid to touch it in ublk io submission context, otherwise this data could become shared between the two contexts, and performance is hurt. So link request into one per-queue list, and use same batching policy of io_uring command, just avoid to touch ucmd in blk-mq io context. Signed-off-by: Ming Lei Reviewed-by: ZiyangZhang Link: https://lore.kernel.org/r/20221029010432.598367-4-ming.lei@redhat.com Signed-off-by: Jens Axboe --- drivers/block/ublk_drv.c | 83 +++++++++++++++++++++++++++++++----------------- 1 file changed, 53 insertions(+), 30 deletions(-) diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 6b2f214f0d5c..3a59271dafe4 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -57,11 +57,14 @@ #define UBLK_PARAM_TYPE_ALL (UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DISCARD) struct ublk_rq_data { - struct callback_head work; + union { + struct callback_head work; + struct llist_node node; + }; }; struct ublk_uring_cmd_pdu { - struct request *req; + struct ublk_queue *ubq; }; /* @@ -119,6 +122,8 @@ struct ublk_queue { struct task_struct *ubq_daemon; char *io_cmd_buf; + struct llist_head io_cmds; + unsigned long io_addr; /* mapped vm address */ unsigned int max_io_sz; bool force_abort; @@ -764,8 +769,12 @@ static inline void __ublk_rq_task_work(struct request *req) static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd) { struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); + struct ublk_queue *ubq = pdu->ubq; + struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds); + struct ublk_rq_data *data; - __ublk_rq_task_work(pdu->req); + llist_for_each_entry(data, io_cmds, node) + __ublk_rq_task_work(blk_mq_rq_from_pdu(data)); } static void ublk_rq_task_work_fn(struct callback_head *work) @@ -777,17 +786,50 @@ static void ublk_rq_task_work_fn(struct callback_head *work) __ublk_rq_task_work(req); } +static void ublk_submit_cmd(struct ublk_queue *ubq, const struct request *rq) +{ + struct ublk_io *io = &ubq->ios[rq->tag]; + + /* + * If the check pass, we know that this is a re-issued request aborted + * previously in monitor_work because the ubq_daemon(cmd's task) is + * PF_EXITING. We cannot call io_uring_cmd_complete_in_task() anymore + * because this ioucmd's io_uring context may be freed now if no inflight + * ioucmd exists. Otherwise we may cause null-deref in ctx->fallback_work. + * + * Note: monitor_work sets UBLK_IO_FLAG_ABORTED and ends this request(releasing + * the tag). Then the request is re-started(allocating the tag) and we are here. + * Since releasing/allocating a tag implies smp_mb(), finding UBLK_IO_FLAG_ABORTED + * guarantees that here is a re-issued request aborted previously. + */ + if (unlikely(io->flags & UBLK_IO_FLAG_ABORTED)) { + struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds); + struct ublk_rq_data *data; + + llist_for_each_entry(data, io_cmds, node) + __ublk_abort_rq(ubq, blk_mq_rq_from_pdu(data)); + } else { + struct io_uring_cmd *cmd = io->cmd; + struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); + + pdu->ubq = ubq; + io_uring_cmd_complete_in_task(cmd, ublk_rq_task_work_cb); + } +} + static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { struct ublk_queue *ubq = hctx->driver_data; struct request *rq = bd->rq; + struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq); blk_status_t res; /* fill iod to slot in io cmd buffer */ res = ublk_setup_iod(ubq, rq); if (unlikely(res != BLK_STS_OK)) return BLK_STS_IOERR; + /* With recovery feature enabled, force_abort is set in * ublk_stop_dev() before calling del_gendisk(). We have to * abort all requeued and new rqs here to let del_gendisk() @@ -809,36 +851,15 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx, } if (ublk_can_use_task_work(ubq)) { - struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq); enum task_work_notify_mode notify_mode = bd->last ? TWA_SIGNAL_NO_IPI : TWA_NONE; if (task_work_add(ubq->ubq_daemon, &data->work, notify_mode)) goto fail; } else { - struct ublk_io *io = &ubq->ios[rq->tag]; - struct io_uring_cmd *cmd = io->cmd; - struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); - - /* - * If the check pass, we know that this is a re-issued request aborted - * previously in monitor_work because the ubq_daemon(cmd's task) is - * PF_EXITING. We cannot call io_uring_cmd_complete_in_task() anymore - * because this ioucmd's io_uring context may be freed now if no inflight - * ioucmd exists. Otherwise we may cause null-deref in ctx->fallback_work. - * - * Note: monitor_work sets UBLK_IO_FLAG_ABORTED and ends this request(releasing - * the tag). Then the request is re-started(allocating the tag) and we are here. - * Since releasing/allocating a tag implies smp_mb(), finding UBLK_IO_FLAG_ABORTED - * guarantees that here is a re-issued request aborted previously. - */ - if ((io->flags & UBLK_IO_FLAG_ABORTED)) - goto fail; - - pdu->req = rq; - io_uring_cmd_complete_in_task(cmd, ublk_rq_task_work_cb); + if (llist_add(&data->node, &ubq->io_cmds)) + ublk_submit_cmd(ubq, rq); } - return BLK_STS_OK; } @@ -1168,17 +1189,19 @@ static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id, { struct ublk_queue *ubq = ublk_get_queue(ub, q_id); struct request *req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag); + struct ublk_rq_data *data = blk_mq_rq_to_pdu(req); if (ublk_can_use_task_work(ubq)) { - struct ublk_rq_data *data = blk_mq_rq_to_pdu(req); - /* should not fail since we call it just in ubq->ubq_daemon */ task_work_add(ubq->ubq_daemon, &data->work, TWA_SIGNAL_NO_IPI); } else { struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); - pdu->req = req; - io_uring_cmd_complete_in_task(cmd, ublk_rq_task_work_cb); + if (llist_add(&data->node, &ubq->io_cmds)) { + pdu->ubq = ubq; + io_uring_cmd_complete_in_task(cmd, + ublk_rq_task_work_cb); + } } } -- cgit v1.2.3 From fee32f312405726eec6b35b5740c48acda0315e9 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Sat, 29 Oct 2022 09:04:32 +0800 Subject: ublk_drv: add ublk_queue_cmd() for cleanup Add helper of ublk_queue_cmd() so that both ublk_queue_rq() and ublk_handle_need_get_data() can reuse this helper. Signed-off-by: Ming Lei Reviewed-by: ZiyangZhang Link: https://lore.kernel.org/r/20221029010432.598367-5-ming.lei@redhat.com Signed-off-by: Jens Axboe --- drivers/block/ublk_drv.c | 47 +++++++++++++++++++++-------------------------- 1 file changed, 21 insertions(+), 26 deletions(-) diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 3a59271dafe4..f96cb01e9604 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -817,12 +817,28 @@ static void ublk_submit_cmd(struct ublk_queue *ubq, const struct request *rq) } } +static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq, + bool last) +{ + struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq); + + if (ublk_can_use_task_work(ubq)) { + enum task_work_notify_mode notify_mode = last ? + TWA_SIGNAL_NO_IPI : TWA_NONE; + + if (task_work_add(ubq->ubq_daemon, &data->work, notify_mode)) + __ublk_abort_rq(ubq, rq); + } else { + if (llist_add(&data->node, &ubq->io_cmds)) + ublk_submit_cmd(ubq, rq); + } +} + static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { struct ublk_queue *ubq = hctx->driver_data; struct request *rq = bd->rq; - struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq); blk_status_t res; /* fill iod to slot in io cmd buffer */ @@ -845,21 +861,12 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx, blk_mq_start_request(bd->rq); if (unlikely(ubq_daemon_is_dying(ubq))) { - fail: __ublk_abort_rq(ubq, rq); return BLK_STS_OK; } - if (ublk_can_use_task_work(ubq)) { - enum task_work_notify_mode notify_mode = bd->last ? - TWA_SIGNAL_NO_IPI : TWA_NONE; + ublk_queue_cmd(ubq, rq, bd->last); - if (task_work_add(ubq->ubq_daemon, &data->work, notify_mode)) - goto fail; - } else { - if (llist_add(&data->node, &ubq->io_cmds)) - ublk_submit_cmd(ubq, rq); - } return BLK_STS_OK; } @@ -1185,24 +1192,12 @@ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq) } static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id, - int tag, struct io_uring_cmd *cmd) + int tag) { struct ublk_queue *ubq = ublk_get_queue(ub, q_id); struct request *req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag); - struct ublk_rq_data *data = blk_mq_rq_to_pdu(req); - if (ublk_can_use_task_work(ubq)) { - /* should not fail since we call it just in ubq->ubq_daemon */ - task_work_add(ubq->ubq_daemon, &data->work, TWA_SIGNAL_NO_IPI); - } else { - struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); - - if (llist_add(&data->node, &ubq->io_cmds)) { - pdu->ubq = ubq; - io_uring_cmd_complete_in_task(cmd, - ublk_rq_task_work_cb); - } - } + ublk_queue_cmd(ubq, req, true); } static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags) @@ -1290,7 +1285,7 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags) io->addr = ub_cmd->addr; io->cmd = cmd; io->flags |= UBLK_IO_FLAG_ACTIVE; - ublk_handle_need_get_data(ub, ub_cmd->q_id, ub_cmd->tag, cmd); + ublk_handle_need_get_data(ub, ub_cmd->q_id, ub_cmd->tag); break; default: goto out; -- cgit v1.2.3 From fa81cbafbf5764ad5053512152345fab37a1fe18 Mon Sep 17 00:00:00 2001 From: Chen Zhongjin Date: Sat, 29 Oct 2022 15:13:55 +0800 Subject: block: Fix possible memory leak for rq_wb on add_disk failure kmemleak reported memory leaks in device_add_disk(): kmemleak: 3 new suspected memory leaks unreferenced object 0xffff88800f420800 (size 512): comm "modprobe", pid 4275, jiffies 4295639067 (age 223.512s) hex dump (first 32 bytes): 04 00 00 00 08 00 00 00 01 00 00 00 00 00 00 00 ................ 00 e1 f5 05 00 00 00 00 00 00 00 00 00 00 00 00 ................ backtrace: [<00000000d3662699>] kmalloc_trace+0x26/0x60 [<00000000edc7aadc>] wbt_init+0x50/0x6f0 [<0000000069601d16>] wbt_enable_default+0x157/0x1c0 [<0000000028fc393f>] blk_register_queue+0x2a4/0x420 [<000000007345a042>] device_add_disk+0x6fd/0xe40 [<0000000060e6aab0>] nbd_dev_add+0x828/0xbf0 [nbd] ... It is because the memory allocated in wbt_enable_default() is not released in device_add_disk() error path. Normally, these memory are freed in: del_gendisk() rq_qos_exit() rqos->ops->exit(rqos); wbt_exit() So rq_qos_exit() is called to free the rq_wb memory for wbt_init(). However in the error path of device_add_disk(), only blk_unregister_queue() is called and make rq_wb memory leaked. Add rq_qos_exit() to the error path to fix it. Fixes: 83cbce957446 ("block: add error handling for device_add_disk / add_disk") Signed-off-by: Chen Zhongjin Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20221029071355.35462-1-chenzhongjin@huawei.com Signed-off-by: Jens Axboe --- block/genhd.c | 1 + 1 file changed, 1 insertion(+) diff --git a/block/genhd.c b/block/genhd.c index fee90eb98b4a..0f9769db2de8 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -527,6 +527,7 @@ out_unregister_bdi: bdi_unregister(disk->bdi); out_unregister_queue: blk_unregister_queue(disk); + rq_qos_exit(disk->queue); out_put_slave_dir: kobject_put(disk->slave_dir); out_put_holder_dir: -- cgit v1.2.3 From 943f45b9399ed8b2b5190cbc797995edaa97f58f Mon Sep 17 00:00:00 2001 From: Chen Jun Date: Mon, 31 Oct 2022 03:12:42 +0000 Subject: blk-mq: Fix kmemleak in blk_mq_init_allocated_queue There is a kmemleak caused by modprobe null_blk.ko unreferenced object 0xffff8881acb1f000 (size 1024): comm "modprobe", pid 836, jiffies 4294971190 (age 27.068s) hex dump (first 32 bytes): 00 00 00 00 ad 4e ad de ff ff ff ff 00 00 00 00 .....N.......... ff ff ff ff ff ff ff ff 00 53 99 9e ff ff ff ff .........S...... backtrace: [<000000004a10c249>] kmalloc_node_trace+0x22/0x60 [<00000000648f7950>] blk_mq_alloc_and_init_hctx+0x289/0x350 [<00000000af06de0e>] blk_mq_realloc_hw_ctxs+0x2fe/0x3d0 [<00000000e00c1872>] blk_mq_init_allocated_queue+0x48c/0x1440 [<00000000d16b4e68>] __blk_mq_alloc_disk+0xc8/0x1c0 [<00000000d10c98c3>] 0xffffffffc450d69d [<00000000b9299f48>] 0xffffffffc4538392 [<0000000061c39ed6>] do_one_initcall+0xd0/0x4f0 [<00000000b389383b>] do_init_module+0x1a4/0x680 [<0000000087cf3542>] load_module+0x6249/0x7110 [<00000000beba61b8>] __do_sys_finit_module+0x140/0x200 [<00000000fdcfff51>] do_syscall_64+0x35/0x80 [<000000003c0f1f71>] entry_SYSCALL_64_after_hwframe+0x46/0xb0 That is because q->ma_ops is set to NULL before blk_release_queue is called. blk_mq_init_queue_data blk_mq_init_allocated_queue blk_mq_realloc_hw_ctxs for (i = 0; i < set->nr_hw_queues; i++) { old_hctx = xa_load(&q->hctx_table, i); if (!blk_mq_alloc_and_init_hctx(.., i, ..)) [1] if (!old_hctx) break; xa_for_each_start(&q->hctx_table, j, hctx, j) blk_mq_exit_hctx(q, set, hctx, j); [2] if (!q->nr_hw_queues) [3] goto err_hctxs; err_exit: q->mq_ops = NULL; [4] blk_put_queue blk_release_queue if (queue_is_mq(q)) [5] blk_mq_release(q); [1]: blk_mq_alloc_and_init_hctx failed at i != 0. [2]: The hctxs allocated by [1] are moved to q->unused_hctx_list and will be cleaned up in blk_mq_release. [3]: q->nr_hw_queues is 0. [4]: Set q->mq_ops to NULL. [5]: queue_is_mq returns false due to [4]. And blk_mq_release will not be called. The hctxs in q->unused_hctx_list are leaked. To fix it, call blk_release_queue in exception path. Fixes: 2f8f1336a48b ("blk-mq: always free hctx after request queue is freed") Signed-off-by: Yuan Can Signed-off-by: Chen Jun Reviewed-by: Ming Lei Link: https://lore.kernel.org/r/20221031031242.94107-1-chenjun102@huawei.com Signed-off-by: Jens Axboe --- block/blk-mq.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 75c8296b6feb..21cc7c2da0f9 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -4193,9 +4193,7 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, return 0; err_hctxs: - xa_destroy(&q->hctx_table); - q->nr_hw_queues = 0; - blk_mq_sysfs_deinit(q); + blk_mq_release(q); err_poll: blk_stat_free_callback(q->poll_cb); q->poll_cb = NULL; -- cgit v1.2.3 From 878eb6e48f240d02ed1c9298020a0b6370695f24 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 1 Nov 2022 00:54:13 +0000 Subject: block: blk_add_rq_to_plug(): clear stale 'last' after flush blk_mq_flush_plug_list() empties ->mq_list and request we'd peeked there before that call is gone; in any case, we are not dealing with a mix of requests for different queues now - there's no requests left in the plug. Signed-off-by: Al Viro Signed-off-by: Jens Axboe --- block/blk-mq.c | 1 + 1 file changed, 1 insertion(+) diff --git a/block/blk-mq.c b/block/blk-mq.c index 21cc7c2da0f9..6a789cda68a5 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1262,6 +1262,7 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) (!blk_queue_nomerges(rq->q) && blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) { blk_mq_flush_plug_list(plug, false); + last = NULL; trace_block_plug(rq->q); } -- cgit v1.2.3