summaryrefslogtreecommitdiff
path: root/drivers/md/dm.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-11-02 01:55:54 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2023-11-02 01:55:54 +0300
commit0364249d2073c32c5214f02866999ce940bc35a2 (patch)
tree8f6fa317669bdc90744481eb5a48f4401f0ca35e /drivers/md/dm.c
parent39714efc23beb38ce850b29f4f132da6d997fc22 (diff)
parent9793c269da6cd339757de6ba5b2c8681b54c99af (diff)
downloadlinux-0364249d2073c32c5214f02866999ce940bc35a2.tar.xz
Merge tag 'for-6.7/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper updates from Mike Snitzer: - Update DM core to directly call the map function for both the linear and stripe targets; which are provided by DM core - Various updates to use new safer string functions - Update DM core to respect REQ_NOWAIT flag in normal bios so that memory allocations are always attempted with GFP_NOWAIT - Add Mikulas Patocka to MAINTAINERS as a DM maintainer! - Improve DM delay target's handling of short delays (< 50ms) by using a kthread to check expiration of IOs rather than timers and a wq - Update the DM error target so that it works with zoned storage. This helps xfstests to provide proper IO error handling coverage when testing a filesystem with native zoned storage support - Update both DM crypt and integrity targets to improve performance by using crypto_shash_digest() rather than init+update+final sequence - Fix DM crypt target by backfilling missing memory allocation accounting for compound pages * tag 'for-6.7/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: dm crypt: account large pages in cc->n_allocated_pages dm integrity: use crypto_shash_digest() in sb_mac() dm crypt: use crypto_shash_digest() in crypt_iv_tcw_whitening() dm error: Add support for zoned block devices dm delay: for short delays, use kthread instead of timers and wq MAINTAINERS: add Mikulas Patocka as a DM maintainer dm: respect REQ_NOWAIT flag in normal bios issued to DM dm: enhance alloc_multiple_bios() to be more versatile dm: make __send_duplicate_bios return unsigned int dm log userspace: replace deprecated strncpy with strscpy dm ioctl: replace deprecated strncpy with strscpy_pad dm crypt: replace open-coded kmemdup_nul dm cache metadata: replace deprecated strncpy with strscpy dm: shortcut the calls to linear_map and stripe_map
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r--drivers/md/dm.c121
1 files changed, 73 insertions, 48 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index f7212e8fc27f..23c32cd1f1d8 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -570,13 +570,15 @@ static void dm_end_io_acct(struct dm_io *io)
dm_io_acct(io, true);
}
-static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
+static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio, gfp_t gfp_mask)
{
struct dm_io *io;
struct dm_target_io *tio;
struct bio *clone;
- clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->mempools->io_bs);
+ clone = bio_alloc_clone(NULL, bio, gfp_mask, &md->mempools->io_bs);
+ if (unlikely(!clone))
+ return NULL;
tio = clone_to_tio(clone);
tio->flags = 0;
dm_tio_set_flag(tio, DM_TIO_INSIDE_DM_IO);
@@ -1426,9 +1428,16 @@ static void __map_bio(struct bio *clone)
if (unlikely(dm_emulate_zone_append(md)))
r = dm_zone_map_bio(tio);
else
+ goto do_map;
+ } else {
+do_map:
+ if (likely(ti->type->map == linear_map))
+ r = linear_map(ti, clone);
+ else if (ti->type->map == stripe_map)
+ r = stripe_map(ti, clone);
+ else
r = ti->type->map(ti, clone);
- } else
- r = ti->type->map(ti, clone);
+ }
switch (r) {
case DM_MAPIO_SUBMITTED:
@@ -1473,15 +1482,15 @@ static void setup_split_accounting(struct clone_info *ci, unsigned int len)
static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
struct dm_target *ti, unsigned int num_bios,
- unsigned *len)
+ unsigned *len, gfp_t gfp_flag)
{
struct bio *bio;
- int try;
+ int try = (gfp_flag & GFP_NOWAIT) ? 0 : 1;
- for (try = 0; try < 2; try++) {
+ for (; try < 2; try++) {
int bio_nr;
- if (try)
+ if (try && num_bios > 1)
mutex_lock(&ci->io->md->table_devices_lock);
for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
bio = alloc_tio(ci, ti, bio_nr, len,
@@ -1491,7 +1500,7 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
bio_list_add(blist, bio);
}
- if (try)
+ if (try && num_bios > 1)
mutex_unlock(&ci->io->md->table_devices_lock);
if (bio_nr == num_bios)
return;
@@ -1501,34 +1510,31 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
}
}
-static int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
- unsigned int num_bios, unsigned int *len)
+static unsigned int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
+ unsigned int num_bios, unsigned int *len,
+ gfp_t gfp_flag)
{
struct bio_list blist = BIO_EMPTY_LIST;
struct bio *clone;
unsigned int ret = 0;
- switch (num_bios) {
- case 0:
- break;
- case 1:
- if (len)
- setup_split_accounting(ci, *len);
- clone = alloc_tio(ci, ti, 0, len, GFP_NOIO);
- __map_bio(clone);
- ret = 1;
- break;
- default:
- if (len)
- setup_split_accounting(ci, *len);
- /* dm_accept_partial_bio() is not supported with shared tio->len_ptr */
- alloc_multiple_bios(&blist, ci, ti, num_bios, len);
- while ((clone = bio_list_pop(&blist))) {
+ if (WARN_ON_ONCE(num_bios == 0)) /* num_bios = 0 is a bug in caller */
+ return 0;
+
+ /* dm_accept_partial_bio() is not supported with shared tio->len_ptr */
+ if (len)
+ setup_split_accounting(ci, *len);
+
+ /*
+ * Using alloc_multiple_bios(), even if num_bios is 1, to consistently
+ * support allocating using GFP_NOWAIT with GFP_NOIO fallback.
+ */
+ alloc_multiple_bios(&blist, ci, ti, num_bios, len, gfp_flag);
+ while ((clone = bio_list_pop(&blist))) {
+ if (num_bios > 1)
dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO);
- __map_bio(clone);
- ret += 1;
- }
- break;
+ __map_bio(clone);
+ ret += 1;
}
return ret;
@@ -1555,8 +1561,12 @@ static void __send_empty_flush(struct clone_info *ci)
unsigned int bios;
struct dm_target *ti = dm_table_get_target(t, i);
+ if (unlikely(ti->num_flush_bios == 0))
+ continue;
+
atomic_add(ti->num_flush_bios, &ci->io->io_count);
- bios = __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
+ bios = __send_duplicate_bios(ci, ti, ti->num_flush_bios,
+ NULL, GFP_NOWAIT);
atomic_sub(ti->num_flush_bios - bios, &ci->io->io_count);
}
@@ -1569,10 +1579,9 @@ static void __send_empty_flush(struct clone_info *ci)
bio_uninit(ci->bio);
}
-static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
- unsigned int num_bios,
- unsigned int max_granularity,
- unsigned int max_sectors)
+static void __send_abnormal_io(struct clone_info *ci, struct dm_target *ti,
+ unsigned int num_bios, unsigned int max_granularity,
+ unsigned int max_sectors)
{
unsigned int len, bios;
@@ -1580,7 +1589,7 @@ static void __send_changing_extent_only(struct clone_info *ci, struct dm_target
__max_io_len(ti, ci->sector, max_granularity, max_sectors));
atomic_add(num_bios, &ci->io->io_count);
- bios = __send_duplicate_bios(ci, ti, num_bios, &len);
+ bios = __send_duplicate_bios(ci, ti, num_bios, &len, GFP_NOIO);
/*
* alloc_io() takes one extra reference for submission, so the
* reference won't reach 0 without the following (+1) subtraction
@@ -1649,8 +1658,8 @@ static blk_status_t __process_abnormal_io(struct clone_info *ci,
if (unlikely(!num_bios))
return BLK_STS_NOTSUPP;
- __send_changing_extent_only(ci, ti, num_bios,
- max_granularity, max_sectors);
+ __send_abnormal_io(ci, ti, num_bios, max_granularity, max_sectors);
+
return BLK_STS_OK;
}
@@ -1709,10 +1718,6 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci)
if (unlikely(!ti))
return BLK_STS_IOERR;
- if (unlikely((ci->bio->bi_opf & REQ_NOWAIT) != 0) &&
- unlikely(!dm_target_supports_nowait(ti->type)))
- return BLK_STS_NOTSUPP;
-
if (unlikely(ci->is_abnormal_io))
return __process_abnormal_io(ci, ti);
@@ -1724,7 +1729,17 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci)
len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count);
setup_split_accounting(ci, len);
- clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO);
+
+ if (unlikely(ci->bio->bi_opf & REQ_NOWAIT)) {
+ if (unlikely(!dm_target_supports_nowait(ti->type)))
+ return BLK_STS_NOTSUPP;
+
+ clone = alloc_tio(ci, ti, 0, &len, GFP_NOWAIT);
+ if (unlikely(!clone))
+ return BLK_STS_AGAIN;
+ } else {
+ clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO);
+ }
__map_bio(clone);
ci->sector += len;
@@ -1733,11 +1748,11 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci)
return BLK_STS_OK;
}
-static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
+static void init_clone_info(struct clone_info *ci, struct dm_io *io,
struct dm_table *map, struct bio *bio, bool is_abnormal)
{
ci->map = map;
- ci->io = alloc_io(md, bio);
+ ci->io = io;
ci->bio = bio;
ci->is_abnormal_io = is_abnormal;
ci->submit_as_polled = false;
@@ -1772,8 +1787,18 @@ static void dm_split_and_process_bio(struct mapped_device *md,
return;
}
- init_clone_info(&ci, md, map, bio, is_abnormal);
- io = ci.io;
+ /* Only support nowait for normal IO */
+ if (unlikely(bio->bi_opf & REQ_NOWAIT) && !is_abnormal) {
+ io = alloc_io(md, bio, GFP_NOWAIT);
+ if (unlikely(!io)) {
+ /* Unable to do anything without dm_io. */
+ bio_wouldblock_error(bio);
+ return;
+ }
+ } else {
+ io = alloc_io(md, bio, GFP_NOIO);
+ }
+ init_clone_info(&ci, io, map, bio, is_abnormal);
if (bio->bi_opf & REQ_PREFLUSH) {
__send_empty_flush(&ci);