summaryrefslogtreecommitdiff
path: root/drivers/md/dm.c
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@kernel.org>2022-04-17 20:00:15 +0300
committerMike Snitzer <snitzer@kernel.org>2022-05-06 00:31:36 +0300
commit4edadf6dcb54d2a86eeb424f27122dc0076d9267 (patch)
tree8e634b6498b61816eeb6bdeb30ef8f77c1741a39 /drivers/md/dm.c
parent9d20653fe84ebd772c3af71808e6a727603e0b71 (diff)
downloadlinux-4edadf6dcb54d2a86eeb424f27122dc0076d9267.tar.xz
dm: improve abnormal bio processing
Read/write/flush are the most common operations, optimize switch in is_abnormal_io() for those cases. Follows same pattern established in block perf-wip commit ("block: optimise blk_may_split for normal rw") Also, push is_abnormal_io() check and blk_queue_split() down from dm_submit_bio() to dm_split_and_process_bio() and set new 'is_abnormal_io' flag in clone_info. Optimize __split_and_process_bio and __process_abnormal_io by leveraging ci.is_abnormal_io flag. Signed-off-by: Mike Snitzer <snitzer@kernel.org>
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r--drivers/md/dm.c67
1 files changed, 36 insertions, 31 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 50e081f68792..9650ba2075b8 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -84,7 +84,8 @@ struct clone_info {
struct dm_io *io;
sector_t sector;
unsigned sector_count;
- bool submit_as_polled;
+ bool is_abnormal_io:1;
+ bool submit_as_polled:1;
};
#define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone))
@@ -1491,21 +1492,24 @@ static void __send_changing_extent_only(struct clone_info *ci, struct dm_target
static bool is_abnormal_io(struct bio *bio)
{
- bool r = false;
+ unsigned int op = bio_op(bio);
- switch (bio_op(bio)) {
- case REQ_OP_DISCARD:
- case REQ_OP_SECURE_ERASE:
- case REQ_OP_WRITE_ZEROES:
- r = true;
- break;
+ if (op != REQ_OP_READ && op != REQ_OP_WRITE && op != REQ_OP_FLUSH) {
+ switch (op) {
+ case REQ_OP_DISCARD:
+ case REQ_OP_SECURE_ERASE:
+ case REQ_OP_WRITE_ZEROES:
+ return true;
+ default:
+ break;
+ }
}
- return r;
+ return false;
}
-static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
- blk_status_t *status)
+static blk_status_t __process_abnormal_io(struct clone_info *ci,
+ struct dm_target *ti)
{
unsigned num_bios = 0;
@@ -1519,8 +1523,6 @@ static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
case REQ_OP_WRITE_ZEROES:
num_bios = ti->num_write_zeroes_bios;
break;
- default:
- return false;
}
/*
@@ -1530,12 +1532,10 @@ static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
* check was performed.
*/
if (unlikely(!num_bios))
- *status = BLK_STS_NOTSUPP;
- else {
- __send_changing_extent_only(ci, ti, num_bios);
- *status = BLK_STS_OK;
- }
- return true;
+ return BLK_STS_NOTSUPP;
+
+ __send_changing_extent_only(ci, ti, num_bios);
+ return BLK_STS_OK;
}
/*
@@ -1588,11 +1588,12 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci)
struct bio *clone;
struct dm_target *ti;
unsigned len;
- blk_status_t error = BLK_STS_IOERR;
ti = dm_table_find_target(ci->map, ci->sector);
- if (unlikely(!ti || __process_abnormal_io(ci, ti, &error)))
- return error;
+ if (unlikely(!ti))
+ return BLK_STS_IOERR;
+ else if (unlikely(ci->is_abnormal_io))
+ return __process_abnormal_io(ci, ti);
/*
* Only support bio polling for normal IO, and the target io is
@@ -1612,11 +1613,12 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci)
}
static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
- struct dm_table *map, struct bio *bio)
+ struct dm_table *map, struct bio *bio, bool is_abnormal)
{
ci->map = map;
ci->io = alloc_io(md, bio);
ci->bio = bio;
+ ci->is_abnormal_io = is_abnormal;
ci->submit_as_polled = false;
ci->sector = bio->bi_iter.bi_sector;
ci->sector_count = bio_sectors(bio);
@@ -1636,8 +1638,18 @@ static void dm_split_and_process_bio(struct mapped_device *md,
struct clone_info ci;
struct dm_io *io;
blk_status_t error = BLK_STS_OK;
+ bool is_abnormal;
- init_clone_info(&ci, md, map, bio);
+ is_abnormal = is_abnormal_io(bio);
+ if (unlikely(is_abnormal)) {
+ /*
+ * Use blk_queue_split() for abnormal IO (e.g. discard, etc)
+ * otherwise associated queue_limits won't be imposed.
+ */
+ blk_queue_split(&bio);
+ }
+
+ init_clone_info(&ci, md, map, bio, is_abnormal);
io = ci.io;
if (bio->bi_opf & REQ_PREFLUSH) {
@@ -1697,13 +1709,6 @@ static void dm_submit_bio(struct bio *bio)
goto out;
}
- /*
- * Use blk_queue_split() for abnormal IO (e.g. discard, writesame, etc)
- * otherwise associated queue_limits won't be imposed.
- */
- if (unlikely(is_abnormal_io(bio)))
- blk_queue_split(&bio);
-
dm_split_and_process_bio(md, map, bio);
out:
dm_put_live_table_bio(md, srcu_idx, bio);