summaryrefslogtreecommitdiff
path: root/block/blk-stat.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2021-12-15 03:23:05 +0300
committerJens Axboe <axboe@kernel.dk>2021-12-15 03:23:05 +0300
commit68497092bde9f53e35cafeb52fa9a267ebe0d9b1 (patch)
tree085ae66e07a24ccf023c3fdd0e784708f9341fdc /block/blk-stat.c
parent0ba4566cd8a4e645b542e6ddbe3dd26c85ad2408 (diff)
downloadlinux-68497092bde9f53e35cafeb52fa9a267ebe0d9b1.tar.xz
block: make queue stat accounting a reference
kyber turns on IO statistics when it is loaded on a queue, which means that even if kyber is then later unloaded, we're still stuck with stats enabled on the queue. Change the account enabled from a bool to an int, and pair the enable call with the equivalent disable call. This ensures that stats gets turned off again appropriately. Reviewed-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-stat.c')
-rw-r--r--block/blk-stat.c21
1 files changed, 16 insertions, 5 deletions
diff --git a/block/blk-stat.c b/block/blk-stat.c
index efb2a80db906..2ea01b5c1aca 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -15,7 +15,7 @@
struct blk_queue_stats {
struct list_head callbacks;
spinlock_t lock;
- bool enable_accounting;
+ int accounting;
};
void blk_rq_stat_init(struct blk_rq_stat *stat)
@@ -161,7 +161,7 @@ void blk_stat_remove_callback(struct request_queue *q,
spin_lock_irqsave(&q->stats->lock, flags);
list_del_rcu(&cb->list);
- if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting)
+ if (list_empty(&q->stats->callbacks) && !q->stats->accounting)
blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
spin_unlock_irqrestore(&q->stats->lock, flags);
@@ -184,13 +184,24 @@ void blk_stat_free_callback(struct blk_stat_callback *cb)
call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
}
+void blk_stat_disable_accounting(struct request_queue *q)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->stats->lock, flags);
+ if (!--q->stats->accounting)
+ blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
+ spin_unlock_irqrestore(&q->stats->lock, flags);
+}
+EXPORT_SYMBOL_GPL(blk_stat_disable_accounting);
+
void blk_stat_enable_accounting(struct request_queue *q)
{
unsigned long flags;
spin_lock_irqsave(&q->stats->lock, flags);
- q->stats->enable_accounting = true;
- blk_queue_flag_set(QUEUE_FLAG_STATS, q);
+ if (!q->stats->accounting++)
+ blk_queue_flag_set(QUEUE_FLAG_STATS, q);
spin_unlock_irqrestore(&q->stats->lock, flags);
}
EXPORT_SYMBOL_GPL(blk_stat_enable_accounting);
@@ -205,7 +216,7 @@ struct blk_queue_stats *blk_alloc_queue_stats(void)
INIT_LIST_HEAD(&stats->callbacks);
spin_lock_init(&stats->lock);
- stats->enable_accounting = false;
+ stats->accounting = 0;
return stats;
}