summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorJohn Garry <john.garry@huawei.com>2020-08-19 18:20:27 +0300
committerJens Axboe <axboe@kernel.dk>2020-09-04 00:20:47 +0300
commitf1b49fdc1c64db110aa1315831e5fe0f8599fa56 (patch)
tree3326f32212adf701cd0b0882f1e0193ac22dad1a /block
parentbccf5e26d99c28980bd6ced474422a1b18402263 (diff)
downloadlinux-f1b49fdc1c64db110aa1315831e5fe0f8599fa56.tar.xz
blk-mq: Record active_queues_shared_sbitmap per tag_set for when using shared sbitmap
For when using a shared sbitmap, no longer should the number of active request queues per hctx be relied on for when judging how to share the tag bitmap. Instead maintain the number of active request queues per tag_set, and make the judgement based on that. Originally-from: Kashyap Desai <kashyap.desai@broadcom.com> Signed-off-by: John Garry <john.garry@huawei.com> Tested-by: Don Brace<don.brace@microsemi.com> #SCSI resv cmds patches used Tested-by: Douglas Gilbert <dgilbert@interlog.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq-tag.c33
-rw-r--r--block/blk-mq.c2
-rw-r--r--block/blk-mq.h16
3 files changed, 40 insertions, 11 deletions
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index c6d7ebc62bdb..c31c4a0478a5 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -23,9 +23,18 @@
*/
bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
{
- if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
- !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
- atomic_inc(&hctx->tags->active_queues);
+ if (blk_mq_is_sbitmap_shared(hctx->flags)) {
+ struct request_queue *q = hctx->queue;
+ struct blk_mq_tag_set *set = q->tag_set;
+
+ if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) &&
+ !test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
+ atomic_inc(&set->active_queues_shared_sbitmap);
+ } else {
+ if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
+ !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
+ atomic_inc(&hctx->tags->active_queues);
+ }
return true;
}
@@ -47,11 +56,19 @@ void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
{
struct blk_mq_tags *tags = hctx->tags;
-
- if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
- return;
-
- atomic_dec(&tags->active_queues);
+ struct request_queue *q = hctx->queue;
+ struct blk_mq_tag_set *set = q->tag_set;
+
+ if (blk_mq_is_sbitmap_shared(hctx->flags)) {
+ if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE,
+ &q->queue_flags))
+ return;
+ atomic_dec(&set->active_queues_shared_sbitmap);
+ } else {
+ if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
+ return;
+ atomic_dec(&tags->active_queues);
+ }
blk_mq_tag_wakeup_all(tags, false);
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ffc5ad0c91b7..eff9d987f85b 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -3442,6 +3442,8 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
goto out_free_mq_map;
if (blk_mq_is_sbitmap_shared(set->flags)) {
+ atomic_set(&set->active_queues_shared_sbitmap, 0);
+
if (blk_mq_init_shared_sbitmap(set, set->flags)) {
ret = -ENOMEM;
goto out_free_mq_rq_maps;
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 25ec73078e95..a52703c98b77 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -292,8 +292,6 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
return true;
- if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
- return true;
/*
* Don't try dividing an ant
@@ -301,7 +299,19 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
if (bt->sb.depth == 1)
return true;
- users = atomic_read(&hctx->tags->active_queues);
+ if (blk_mq_is_sbitmap_shared(hctx->flags)) {
+ struct request_queue *q = hctx->queue;
+ struct blk_mq_tag_set *set = q->tag_set;
+
+ if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &q->queue_flags))
+ return true;
+ users = atomic_read(&set->active_queues_shared_sbitmap);
+ } else {
+ if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
+ return true;
+ users = atomic_read(&hctx->tags->active_queues);
+ }
+
if (!users)
return true;