summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorMikulas Patocka <mpatocka@redhat.com>2018-12-06 19:41:20 +0300
committerJens Axboe <axboe@kernel.dk>2018-12-10 18:30:37 +0300
commit1226b8dd0e91331cfab500f305b2c264445a0392 (patch)
tree347679d6d2eafea136d7d93529e16cd6fbe87609 /block
parent5b18b5a737600fd20ba2045f320d5926ebbf341a (diff)
downloadlinux-1226b8dd0e91331cfab500f305b2c264445a0392.tar.xz
block: switch to per-cpu in-flight counters
Now when part_round_stats is gone, we can switch to per-cpu in-flight counters. We use the local-atomic type local_t, so that if part_inc_in_flight or part_dec_in_flight is reentrantly called from an interrupt, the value will be correct. The other counters could be corrupted due to reentrant interrupt, but the corruption only results in slight counter skew - the in_flight counter must be exact, so it needs local_t. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/genhd.c43
1 files changed, 33 insertions, 10 deletions
diff --git a/block/genhd.c b/block/genhd.c
index cdf174d7d329..9827a2c05db7 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -50,9 +50,9 @@ void part_inc_in_flight(struct request_queue *q, struct hd_struct *part, int rw)
if (queue_is_mq(q))
return;
- atomic_inc(&part->in_flight[rw]);
+ part_stat_local_inc(part, in_flight[rw]);
if (part->partno)
- atomic_inc(&part_to_disk(part)->part0.in_flight[rw]);
+ part_stat_local_inc(&part_to_disk(part)->part0, in_flight[rw]);
}
void part_dec_in_flight(struct request_queue *q, struct hd_struct *part, int rw)
@@ -60,38 +60,61 @@ void part_dec_in_flight(struct request_queue *q, struct hd_struct *part, int rw)
if (queue_is_mq(q))
return;
- atomic_dec(&part->in_flight[rw]);
+ part_stat_local_dec(part, in_flight[rw]);
if (part->partno)
- atomic_dec(&part_to_disk(part)->part0.in_flight[rw]);
+ part_stat_local_dec(&part_to_disk(part)->part0, in_flight[rw]);
}
void part_in_flight(struct request_queue *q, struct hd_struct *part,
unsigned int inflight[2])
{
+ int cpu;
+
if (queue_is_mq(q)) {
blk_mq_in_flight(q, part, inflight);
return;
}
- inflight[0] = atomic_read(&part->in_flight[0]) +
- atomic_read(&part->in_flight[1]);
+ inflight[0] = 0;
+ for_each_possible_cpu(cpu) {
+ inflight[0] += part_stat_local_read_cpu(part, in_flight[0], cpu) +
+ part_stat_local_read_cpu(part, in_flight[1], cpu);
+ }
+ if ((int)inflight[0] < 0)
+ inflight[0] = 0;
+
if (part->partno) {
part = &part_to_disk(part)->part0;
- inflight[1] = atomic_read(&part->in_flight[0]) +
- atomic_read(&part->in_flight[1]);
+ inflight[1] = 0;
+ for_each_possible_cpu(cpu) {
+ inflight[1] += part_stat_local_read_cpu(part, in_flight[0], cpu) +
+ part_stat_local_read_cpu(part, in_flight[1], cpu);
+ }
+ if ((int)inflight[1] < 0)
+ inflight[1] = 0;
}
}
void part_in_flight_rw(struct request_queue *q, struct hd_struct *part,
unsigned int inflight[2])
{
+ int cpu;
+
if (queue_is_mq(q)) {
blk_mq_in_flight_rw(q, part, inflight);
return;
}
- inflight[0] = atomic_read(&part->in_flight[0]);
- inflight[1] = atomic_read(&part->in_flight[1]);
+ inflight[0] = 0;
+ inflight[1] = 0;
+ for_each_possible_cpu(cpu) {
+ inflight[0] += part_stat_local_read_cpu(part, in_flight[0], cpu);
+ inflight[1] += part_stat_local_read_cpu(part, in_flight[1], cpu);
+ }
+ if ((int)inflight[0] < 0)
+ inflight[0] = 0;
+ if ((int)inflight[1] < 0)
+ inflight[1] = 0;
}
struct hd_struct *__disk_get_part(struct gendisk *disk, int partno)