summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-10-29 21:10:29 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2021-10-29 21:10:29 +0300
commita379fbbcb88bcf43d886977c6fb91fb43f330b84 (patch)
tree06ab469e2fa70005acac21ac49215203b3983030 /block
parent17d50f89410c5f71142657d9a012cbabcaadb134 (diff)
parentf4aaf1fa8b17e74aa46bf0165c35a1575b239d4e (diff)
downloadlinux-a379fbbcb88bcf43d886977c6fb91fb43f330b84.tar.xz
Merge tag 'block-5.15-2021-10-29' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: - NVMe pull request: - fix nvmet-tcp header digest verification (Amit Engel) - fix a memory leak in nvmet-tcp when releasing a queue (Maurizio Lombardi) - fix nvme-tcp H2CData PDU send accounting again (Sagi Grimberg) - fix digest pointer calculation in nvme-tcp and nvmet-tcp (Varun Prakash) - fix possible nvme-tcp req->offset corruption (Varun Prakash) - Queue drain ordering fix (Ming) - Partition check regression for zoned devices (Shin'ichiro) - Zone queue restart fix (Naohiro) * tag 'block-5.15-2021-10-29' of git://git.kernel.dk/linux-block: block: Fix partition check for host-aware zoned block devices nvmet-tcp: fix header digest verification nvmet-tcp: fix data digest pointer calculation nvme-tcp: fix data digest pointer calculation nvme-tcp: fix possible req->offset corruption block: schedule queue restart after BLK_STS_ZONE_RESOURCE block: drain queue after disk is removed from sysfs nvme-tcp: fix H2CData PDU send accounting (again) nvmet-tcp: fix a memory leak when releasing a queue
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c13
-rw-r--r--block/blk-settings.c20
-rw-r--r--block/genhd.c22
3 files changed, 40 insertions, 15 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index bc026372de43..652a31fc3bb3 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1325,6 +1325,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
int errors, queued;
blk_status_t ret = BLK_STS_OK;
LIST_HEAD(zone_list);
+ bool needs_resource = false;
if (list_empty(list))
return false;
@@ -1370,6 +1371,8 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
queued++;
break;
case BLK_STS_RESOURCE:
+ needs_resource = true;
+ fallthrough;
case BLK_STS_DEV_RESOURCE:
blk_mq_handle_dev_resource(rq, list);
goto out;
@@ -1380,6 +1383,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
* accept.
*/
blk_mq_handle_zone_resource(rq, &zone_list);
+ needs_resource = true;
break;
default:
errors++;
@@ -1406,7 +1410,6 @@ out:
/* For non-shared tags, the RESTART check will suffice */
bool no_tag = prep == PREP_DISPATCH_NO_TAG &&
(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED);
- bool no_budget_avail = prep == PREP_DISPATCH_NO_BUDGET;
if (nr_budgets)
blk_mq_release_budgets(q, list);
@@ -1447,14 +1450,16 @@ out:
* If driver returns BLK_STS_RESOURCE and SCHED_RESTART
* bit is set, run queue after a delay to avoid IO stalls
* that could otherwise occur if the queue is idle. We'll do
- * similar if we couldn't get budget and SCHED_RESTART is set.
+ * similar if we couldn't get budget or couldn't lock a zone
+ * and SCHED_RESTART is set.
*/
needs_restart = blk_mq_sched_needs_restart(hctx);
+ if (prep == PREP_DISPATCH_NO_BUDGET)
+ needs_resource = true;
if (!needs_restart ||
(no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
blk_mq_run_hw_queue(hctx, true);
- else if (needs_restart && (ret == BLK_STS_RESOURCE ||
- no_budget_avail))
+ else if (needs_restart && needs_resource)
blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
blk_mq_update_dispatch_busy(hctx, true);
diff --git a/block/blk-settings.c b/block/blk-settings.c
index a7c857ad7d10..b880c70e22e4 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -842,6 +842,24 @@ bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
}
EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
+static bool disk_has_partitions(struct gendisk *disk)
+{
+ unsigned long idx;
+ struct block_device *part;
+ bool ret = false;
+
+ rcu_read_lock();
+ xa_for_each(&disk->part_tbl, idx, part) {
+ if (bdev_is_partition(part)) {
+ ret = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
/**
* blk_queue_set_zoned - configure a disk queue zoned model.
* @disk: the gendisk of the queue to configure
@@ -876,7 +894,7 @@ void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
* we do nothing special as far as the block layer is concerned.
*/
if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) ||
- !xa_empty(&disk->part_tbl))
+ disk_has_partitions(disk))
model = BLK_ZONED_NONE;
break;
case BLK_ZONED_NONE:
diff --git a/block/genhd.c b/block/genhd.c
index b49858550fa6..ab12ae6e636e 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -588,16 +588,6 @@ void del_gendisk(struct gendisk *disk)
* Prevent new I/O from crossing bio_queue_enter().
*/
blk_queue_start_drain(q);
- blk_mq_freeze_queue_wait(q);
-
- rq_qos_exit(q);
- blk_sync_queue(q);
- blk_flush_integrity();
- /*
- * Allow using passthrough request again after the queue is torn down.
- */
- blk_queue_flag_clear(QUEUE_FLAG_INIT_DONE, q);
- __blk_mq_unfreeze_queue(q, true);
if (!(disk->flags & GENHD_FL_HIDDEN)) {
sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
@@ -620,6 +610,18 @@ void del_gendisk(struct gendisk *disk)
sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
device_del(disk_to_dev(disk));
+
+ blk_mq_freeze_queue_wait(q);
+
+ rq_qos_exit(q);
+ blk_sync_queue(q);
+ blk_flush_integrity();
+ /*
+ * Allow using passthrough request again after the queue is torn down.
+ */
+ blk_queue_flag_clear(QUEUE_FLAG_INIT_DONE, q);
+ __blk_mq_unfreeze_queue(q, true);
+
}
EXPORT_SYMBOL(del_gendisk);