summaryrefslogtreecommitdiff
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/Kconfig1
-rw-r--r--drivers/md/bcache/btree.c6
-rw-r--r--drivers/md/bcache/io.c3
-rw-r--r--drivers/md/bcache/journal.c16
-rw-r--r--drivers/md/bcache/movinggc.c4
-rw-r--r--drivers/md/bcache/request.c26
-rw-r--r--drivers/md/bcache/super.c9
-rw-r--r--drivers/md/bcache/writeback.c21
-rw-r--r--drivers/md/dm-cache-policy-smq.c4
-rw-r--r--drivers/md/dm-cache-target.c43
-rw-r--r--drivers/md/dm-clone-target.c10
-rw-r--r--drivers/md/dm-core.h100
-rw-r--r--drivers/md/dm-crypt.c59
-rw-r--r--drivers/md/dm-delay.c5
-rw-r--r--drivers/md/dm-ima.c6
-rw-r--r--drivers/md/dm-integrity.c5
-rw-r--r--drivers/md/dm-io.c13
-rw-r--r--drivers/md/dm-ioctl.c2
-rw-r--r--drivers/md/dm-log-writes.c39
-rw-r--r--drivers/md/dm-mpath.c5
-rw-r--r--drivers/md/dm-rq.c33
-rw-r--r--drivers/md/dm-snap.c21
-rw-r--r--drivers/md/dm-stats.c34
-rw-r--r--drivers/md/dm-stats.h11
-rw-r--r--drivers/md/dm-table.c57
-rw-r--r--drivers/md/dm-thin-metadata.c28
-rw-r--r--drivers/md/dm-thin-metadata.h1
-rw-r--r--drivers/md/dm-thin.c56
-rw-r--r--drivers/md/dm-writecache.c7
-rw-r--r--drivers/md/dm-zoned-metadata.c30
-rw-r--r--drivers/md/dm-zoned-target.c4
-rw-r--r--drivers/md/dm-zoned.h9
-rw-r--r--drivers/md/dm.c829
-rw-r--r--drivers/md/md-faulty.c4
-rw-r--r--drivers/md/md-multipath.c13
-rw-r--r--drivers/md/md.c39
-rw-r--r--drivers/md/raid1-10.c5
-rw-r--r--drivers/md/raid1.c58
-rw-r--r--drivers/md/raid1.h1
-rw-r--r--drivers/md/raid10.c47
-rw-r--r--drivers/md/raid10.h1
-rw-r--r--drivers/md/raid5-cache.c42
-rw-r--r--drivers/md/raid5-ppl.c29
-rw-r--r--drivers/md/raid5.c29
44 files changed, 927 insertions, 838 deletions
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index b5ea378e66cb..998a5cfdbc4e 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -204,6 +204,7 @@ config BLK_DEV_DM
tristate "Device mapper support"
select BLOCK_HOLDER_DEPRECATED if SYSFS
select BLK_DEV_DM_BUILTIN
+ select BLK_MQ_STACKING
depends on DAX || DAX=n
help
Device-mapper is a low level volume manager. It works by allowing
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 88c573eeb598..ad9f16689419 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -2060,9 +2060,11 @@ int bch_btree_check(struct cache_set *c)
}
}
+ /*
+ * Must wait for all threads to stop.
+ */
wait_event_interruptible(check_state->wait,
- atomic_read(&check_state->started) == 0 ||
- test_bit(CACHE_SET_IO_DISABLE, &c->flags));
+ atomic_read(&check_state->started) == 0);
for (i = 0; i < check_state->total_threads; i++) {
if (check_state->infos[i].result) {
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 9c6f9ec55b72..020712c5203f 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -26,7 +26,8 @@ struct bio *bch_bbio_alloc(struct cache_set *c)
struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO);
struct bio *bio = &b->bio;
- bio_init(bio, bio->bi_inline_vecs, meta_bucket_pages(&c->cache->sb));
+ bio_init(bio, NULL, bio->bi_inline_vecs,
+ meta_bucket_pages(&c->cache->sb), 0);
return bio;
}
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 61bd79babf7a..7c2ca52ca3e4 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -53,14 +53,12 @@ static int journal_read_bucket(struct cache *ca, struct list_head *list,
reread: left = ca->sb.bucket_size - offset;
len = min_t(unsigned int, left, PAGE_SECTORS << JSET_BITS);
- bio_reset(bio);
+ bio_reset(bio, ca->bdev, REQ_OP_READ);
bio->bi_iter.bi_sector = bucket + offset;
- bio_set_dev(bio, ca->bdev);
bio->bi_iter.bi_size = len << 9;
bio->bi_end_io = journal_read_endio;
bio->bi_private = &cl;
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
bch_bio_map(bio, data);
closure_bio_submit(ca->set, bio, &cl);
@@ -611,11 +609,9 @@ static void do_journal_discard(struct cache *ca)
atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
- bio_init(bio, bio->bi_inline_vecs, 1);
- bio_set_op_attrs(bio, REQ_OP_DISCARD, 0);
+ bio_init(bio, ca->bdev, bio->bi_inline_vecs, 1, REQ_OP_DISCARD);
bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
ca->sb.d[ja->discard_idx]);
- bio_set_dev(bio, ca->bdev);
bio->bi_iter.bi_size = bucket_bytes(ca);
bio->bi_end_io = journal_discard_endio;
@@ -773,16 +769,14 @@ static void journal_write_unlocked(struct closure *cl)
atomic_long_add(sectors, &ca->meta_sectors_written);
- bio_reset(bio);
+ bio_reset(bio, ca->bdev, REQ_OP_WRITE |
+ REQ_SYNC | REQ_META | REQ_PREFLUSH | REQ_FUA);
+ bch_bio_map(bio, w->data);
bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
- bio_set_dev(bio, ca->bdev);
bio->bi_iter.bi_size = sectors << 9;
bio->bi_end_io = journal_write_endio;
bio->bi_private = w;
- bio_set_op_attrs(bio, REQ_OP_WRITE,
- REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
- bch_bio_map(bio, w->data);
trace_bcache_journal_write(bio, w->data->keys);
bio_list_add(&list, bio);
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index b9c3d27ec093..99499d1f6e66 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -79,8 +79,8 @@ static void moving_init(struct moving_io *io)
{
struct bio *bio = &io->bio.bio;
- bio_init(bio, bio->bi_inline_vecs,
- DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS));
+ bio_init(bio, NULL, bio->bi_inline_vecs,
+ DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS), 0);
bio_get(bio);
bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index d15aae6c51c1..fdd0194f84dd 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -44,10 +44,10 @@ static void bio_csum(struct bio *bio, struct bkey *k)
uint64_t csum = 0;
bio_for_each_segment(bv, bio, iter) {
- void *d = kmap(bv.bv_page) + bv.bv_offset;
+ void *d = bvec_kmap_local(&bv);
csum = crc64_be(csum, d, bv.bv_len);
- kunmap(bv.bv_page);
+ kunmap_local(d);
}
k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
@@ -685,8 +685,7 @@ static void do_bio_hook(struct search *s,
{
struct bio *bio = &s->bio.bio;
- bio_init(bio, NULL, 0);
- __bio_clone_fast(bio, orig_bio);
+ bio_init_clone(bio->bi_bdev, bio, orig_bio, GFP_NOIO);
/*
* bi_end_io can be set separately somewhere else, e.g. the
* variants in,
@@ -831,11 +830,11 @@ static void cached_dev_read_done(struct closure *cl)
*/
if (s->iop.bio) {
- bio_reset(s->iop.bio);
+ bio_reset(s->iop.bio, s->cache_miss->bi_bdev, REQ_OP_READ);
s->iop.bio->bi_iter.bi_sector =
s->cache_miss->bi_iter.bi_sector;
- bio_copy_dev(s->iop.bio, s->cache_miss);
s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
+ bio_clone_blkg_association(s->iop.bio, s->cache_miss);
bch_bio_map(s->iop.bio, NULL);
bio_copy_data(s->cache_miss, s->iop.bio);
@@ -913,14 +912,13 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
/* btree_search_recurse()'s btree iterator is no good anymore */
ret = miss == bio ? MAP_DONE : -EINTR;
- cache_bio = bio_alloc_bioset(GFP_NOWAIT,
+ cache_bio = bio_alloc_bioset(miss->bi_bdev,
DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
- &dc->disk.bio_split);
+ 0, GFP_NOWAIT, &dc->disk.bio_split);
if (!cache_bio)
goto out_submit;
cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
- bio_copy_dev(cache_bio, miss);
cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
cache_bio->bi_end_io = backing_request_endio;
@@ -1025,21 +1023,21 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
*/
struct bio *flush;
- flush = bio_alloc_bioset(GFP_NOIO, 0,
- &dc->disk.bio_split);
+ flush = bio_alloc_bioset(bio->bi_bdev, 0,
+ REQ_OP_WRITE | REQ_PREFLUSH,
+ GFP_NOIO, &dc->disk.bio_split);
if (!flush) {
s->iop.status = BLK_STS_RESOURCE;
goto insert_data;
}
- bio_copy_dev(flush, bio);
flush->bi_end_io = backing_request_endio;
flush->bi_private = cl;
- flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
/* I/O request sent to backing device */
closure_bio_submit(s->iop.c, flush, cl);
}
} else {
- s->iop.bio = bio_clone_fast(bio, GFP_NOIO, &dc->disk.bio_split);
+ s->iop.bio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
+ &dc->disk.bio_split);
/* I/O request sent to backing device */
bio->bi_end_io = backing_request_endio;
closure_bio_submit(s->iop.c, bio, cl);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 140f35dc0c45..bf3de149d3c9 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -18,7 +18,6 @@
#include <linux/blkdev.h>
#include <linux/pagemap.h>
#include <linux/debugfs.h>
-#include <linux/genhd.h>
#include <linux/idr.h>
#include <linux/kthread.h>
#include <linux/workqueue.h>
@@ -343,8 +342,7 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
down(&dc->sb_write_mutex);
closure_init(cl, parent);
- bio_init(bio, dc->sb_bv, 1);
- bio_set_dev(bio, dc->bdev);
+ bio_init(bio, dc->bdev, dc->sb_bv, 1, 0);
bio->bi_end_io = write_bdev_super_endio;
bio->bi_private = dc;
@@ -387,8 +385,7 @@ void bcache_write_super(struct cache_set *c)
if (ca->sb.version < version)
ca->sb.version = version;
- bio_init(bio, ca->sb_bv, 1);
- bio_set_dev(bio, ca->bdev);
+ bio_init(bio, ca->bdev, ca->sb_bv, 1, 0);
bio->bi_end_io = write_super_endio;
bio->bi_private = ca;
@@ -2240,7 +2237,7 @@ static int cache_alloc(struct cache *ca)
__module_get(THIS_MODULE);
kobject_init(&ca->kobj, &bch_cache_ktype);
- bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8);
+ bio_init(&ca->journal.bio, NULL, ca->journal.bio.bi_inline_vecs, 8, 0);
/*
* when ca->sb.njournal_buckets is not zero, journal exists,
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index c7560f66dca8..9ee0005874cd 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -292,8 +292,8 @@ static void dirty_init(struct keybuf_key *w)
struct dirty_io *io = w->private;
struct bio *bio = &io->bio;
- bio_init(bio, bio->bi_inline_vecs,
- DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS));
+ bio_init(bio, NULL, bio->bi_inline_vecs,
+ DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), 0);
if (!io->dc->writeback_percent)
bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
@@ -585,10 +585,13 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
sectors_dirty = atomic_add_return(s,
d->stripe_sectors_dirty + stripe);
- if (sectors_dirty == d->stripe_size)
- set_bit(stripe, d->full_dirty_stripes);
- else
- clear_bit(stripe, d->full_dirty_stripes);
+ if (sectors_dirty == d->stripe_size) {
+ if (!test_bit(stripe, d->full_dirty_stripes))
+ set_bit(stripe, d->full_dirty_stripes);
+ } else {
+ if (test_bit(stripe, d->full_dirty_stripes))
+ clear_bit(stripe, d->full_dirty_stripes);
+ }
nr_sectors -= s;
stripe_offset = 0;
@@ -998,9 +1001,11 @@ void bch_sectors_dirty_init(struct bcache_device *d)
}
}
+ /*
+ * Must wait for all threads to stop.
+ */
wait_event_interruptible(state->wait,
- atomic_read(&state->started) == 0 ||
- test_bit(CACHE_SET_IO_DISABLE, &c->flags));
+ atomic_read(&state->started) == 0);
out:
kfree(state);
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index b61aac00ff40..a3d281fc14c3 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1026,7 +1026,9 @@ static unsigned default_promote_level(struct smq_policy *mq)
* This scheme reminds me of a graph of entropy vs probability of a
* binary variable.
*/
- static unsigned table[] = {1, 1, 1, 2, 4, 6, 7, 8, 7, 6, 4, 4, 3, 3, 2, 2, 1};
+ static const unsigned int table[] = {
+ 1, 1, 1, 2, 4, 6, 7, 8, 7, 6, 4, 4, 3, 3, 2, 2, 1
+ };
unsigned hits = mq->cache_stats.hits;
unsigned misses = mq->cache_stats.misses;
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 447d030036d1..780a61bc6cc0 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -744,21 +744,14 @@ static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
spin_unlock_irq(&cache->lock);
}
-static void __remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
- dm_oblock_t oblock, bool bio_has_pbd)
-{
- if (bio_has_pbd)
- check_if_tick_bio_needed(cache, bio);
- remap_to_origin(cache, bio);
- if (bio_data_dir(bio) == WRITE)
- clear_discard(cache, oblock_to_dblock(cache, oblock));
-}
-
static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
dm_oblock_t oblock)
{
// FIXME: check_if_tick_bio_needed() is called way too much through this interface
- __remap_to_origin_clear_discard(cache, bio, oblock, true);
+ check_if_tick_bio_needed(cache, bio);
+ remap_to_origin(cache, bio);
+ if (bio_data_dir(bio) == WRITE)
+ clear_discard(cache, oblock_to_dblock(cache, oblock));
}
static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
@@ -810,7 +803,7 @@ static void accounted_complete(struct cache *cache, struct bio *bio)
static void accounted_request(struct cache *cache, struct bio *bio)
{
accounted_begin(cache, bio);
- submit_bio_noacct(bio);
+ dm_submit_bio_remap(bio, NULL);
}
static void issue_op(struct bio *bio, void *context)
@@ -826,16 +819,15 @@ static void issue_op(struct bio *bio, void *context)
static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio,
dm_oblock_t oblock, dm_cblock_t cblock)
{
- struct bio *origin_bio = bio_clone_fast(bio, GFP_NOIO, &cache->bs);
+ struct bio *origin_bio = bio_alloc_clone(cache->origin_dev->bdev, bio,
+ GFP_NOIO, &cache->bs);
BUG_ON(!origin_bio);
bio_chain(origin_bio, bio);
- /*
- * Passing false to __remap_to_origin_clear_discard() skips
- * all code that might use per_bio_data (since clone doesn't have it)
- */
- __remap_to_origin_clear_discard(cache, origin_bio, oblock, false);
+
+ if (bio_data_dir(origin_bio) == WRITE)
+ clear_discard(cache, oblock_to_dblock(cache, oblock));
submit_bio(origin_bio);
remap_to_cache(cache, bio, cblock);
@@ -1716,7 +1708,7 @@ static bool process_bio(struct cache *cache, struct bio *bio)
bool commit_needed;
if (map_bio(cache, bio, get_bio_block(cache, bio), &commit_needed) == DM_MAPIO_REMAPPED)
- submit_bio_noacct(bio);
+ dm_submit_bio_remap(bio, NULL);
return commit_needed;
}
@@ -1782,7 +1774,7 @@ static bool process_discard_bio(struct cache *cache, struct bio *bio)
if (cache->features.discard_passdown) {
remap_to_origin(cache, bio);
- submit_bio_noacct(bio);
+ dm_submit_bio_remap(bio, NULL);
} else
bio_endio(bio);
@@ -2023,7 +2015,6 @@ static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
{
int r;
sector_t metadata_dev_size;
- char b[BDEVNAME_SIZE];
if (!at_least_one_arg(as, error))
return -EINVAL;
@@ -2037,8 +2028,8 @@ static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
metadata_dev_size = get_dev_size(ca->metadata_dev);
if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING)
- DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
- bdevname(ca->metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
+ DMWARN("Metadata device %pg is larger than %u sectors: excess space will not be used.",
+ ca->metadata_dev->bdev, THIN_METADATA_MAX_SECTORS);
return 0;
}
@@ -2365,6 +2356,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
cache->ti = ca->ti;
ti->private = cache;
+ ti->accounts_remapped_io = true;
ti->num_flush_bios = 2;
ti->flush_supported = true;
@@ -3353,7 +3345,6 @@ static void disable_passdown_if_not_supported(struct cache *cache)
struct block_device *origin_bdev = cache->origin_dev->bdev;
struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits;
const char *reason = NULL;
- char buf[BDEVNAME_SIZE];
if (!cache->features.discard_passdown)
return;
@@ -3365,8 +3356,8 @@ static void disable_passdown_if_not_supported(struct cache *cache)
reason = "max discard sectors smaller than a block";
if (reason) {
- DMWARN("Origin device (%s) %s: Disabling discard passdown.",
- bdevname(origin_bdev, buf), reason);
+ DMWARN("Origin device (%pg) %s: Disabling discard passdown.",
+ origin_bdev, reason);
cache->features.discard_passdown = false;
}
}
diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
index 4599632d7a84..128316a73d01 100644
--- a/drivers/md/dm-clone-target.c
+++ b/drivers/md/dm-clone-target.c
@@ -1682,7 +1682,6 @@ static int parse_metadata_dev(struct clone *clone, struct dm_arg_set *as, char *
{
int r;
sector_t metadata_dev_size;
- char b[BDEVNAME_SIZE];
r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
&clone->metadata_dev);
@@ -1693,8 +1692,8 @@ static int parse_metadata_dev(struct clone *clone, struct dm_arg_set *as, char *
metadata_dev_size = get_dev_size(clone->metadata_dev);
if (metadata_dev_size > DM_CLONE_METADATA_MAX_SECTORS_WARNING)
- DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
- bdevname(clone->metadata_dev->bdev, b), DM_CLONE_METADATA_MAX_SECTORS);
+ DMWARN("Metadata device %pg is larger than %u sectors: excess space will not be used.",
+ clone->metadata_dev->bdev, DM_CLONE_METADATA_MAX_SECTORS);
return 0;
}
@@ -2033,7 +2032,6 @@ static void disable_passdown_if_not_supported(struct clone *clone)
struct block_device *dest_dev = clone->dest_dev->bdev;
struct queue_limits *dest_limits = &bdev_get_queue(dest_dev)->limits;
const char *reason = NULL;
- char buf[BDEVNAME_SIZE];
if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags))
return;
@@ -2044,8 +2042,8 @@ static void disable_passdown_if_not_supported(struct clone *clone)
reason = "max discard sectors smaller than a region";
if (reason) {
- DMWARN("Destination device (%s) %s: Disabling discard passdown.",
- bdevname(dest_dev, buf), reason);
+ DMWARN("Destination device (%pd) %s: Disabling discard passdown.",
+ dest_dev, reason);
clear_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
}
}
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 219407a7b77e..4081cb6cf7b3 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -11,7 +11,6 @@
#include <linux/kthread.h>
#include <linux/ktime.h>
-#include <linux/genhd.h>
#include <linux/blk-mq.h>
#include <linux/blk-crypto-profile.h>
@@ -65,11 +64,21 @@ struct mapped_device {
struct gendisk *disk;
struct dax_device *dax_dev;
+ wait_queue_head_t wait;
+ unsigned long __percpu *pending_io;
+
+ /* forced geometry settings */
+ struct hd_geometry geometry;
+
+ /*
+ * Processing queue (flush)
+ */
+ struct workqueue_struct *wq;
+
/*
* A list of ios that arrived while we were suspended.
*/
struct work_struct work;
- wait_queue_head_t wait;
spinlock_t deferred_lock;
struct bio_list deferred;
@@ -84,36 +93,28 @@ struct mapped_device {
struct list_head uevent_list;
spinlock_t uevent_lock; /* Protect access to uevent_list */
+ /* for blk-mq request-based DM support */
+ bool init_tio_pdu:1;
+ struct blk_mq_tag_set *tag_set;
+
+ struct dm_stats stats;
+
/* the number of internal suspends */
unsigned internal_suspend_count;
+ int swap_bios;
+ struct semaphore swap_bios_semaphore;
+ struct mutex swap_bios_lock;
+
/*
* io objects are allocated from here.
*/
struct bio_set io_bs;
struct bio_set bs;
- /*
- * Processing queue (flush)
- */
- struct workqueue_struct *wq;
-
- /* forced geometry settings */
- struct hd_geometry geometry;
-
/* kobject and completion */
struct dm_kobject_holder kobj_holder;
- int swap_bios;
- struct semaphore swap_bios_semaphore;
- struct mutex swap_bios_lock;
-
- struct dm_stats stats;
-
- /* for blk-mq request-based DM support */
- struct blk_mq_tag_set *tag_set;
- bool init_tio_pdu:1;
-
struct srcu_struct io_barrier;
#ifdef CONFIG_BLK_DEV_ZONED
@@ -206,35 +207,76 @@ struct dm_table {
/*
* One of these is allocated per clone bio.
*/
-#define DM_TIO_MAGIC 7282014
+#define DM_TIO_MAGIC 28714
struct dm_target_io {
- unsigned int magic;
+ unsigned short magic;
+ unsigned short flags;
+ unsigned int target_bio_nr;
struct dm_io *io;
struct dm_target *ti;
- unsigned int target_bio_nr;
unsigned int *len_ptr;
- bool inside_dm_io;
+ sector_t old_sector;
struct bio clone;
};
/*
+ * dm_target_io flags
+ */
+enum {
+ DM_TIO_INSIDE_DM_IO,
+ DM_TIO_IS_DUPLICATE_BIO
+};
+
+static inline bool dm_tio_flagged(struct dm_target_io *tio, unsigned int bit)
+{
+ return (tio->flags & (1U << bit)) != 0;
+}
+
+static inline void dm_tio_set_flag(struct dm_target_io *tio, unsigned int bit)
+{
+ tio->flags |= (1U << bit);
+}
+
+/*
* One of these is allocated per original bio.
* It contains the first clone used for that original.
*/
-#define DM_IO_MAGIC 5191977
+#define DM_IO_MAGIC 19577
struct dm_io {
- unsigned int magic;
- struct mapped_device *md;
- blk_status_t status;
+ unsigned short magic;
+ unsigned short flags;
atomic_t io_count;
+ struct mapped_device *md;
struct bio *orig_bio;
+ blk_status_t status;
+ spinlock_t lock;
unsigned long start_time;
- spinlock_t endio_lock;
+ void *data;
+ struct hlist_node node;
+ struct task_struct *map_task;
struct dm_stats_aux stats_aux;
/* last member of dm_target_io is 'struct bio' */
struct dm_target_io tio;
};
+/*
+ * dm_io flags
+ */
+enum {
+ DM_IO_START_ACCT,
+ DM_IO_ACCOUNTED
+};
+
+static inline bool dm_io_flagged(struct dm_io *io, unsigned int bit)
+{
+ return (io->flags & (1U << bit)) != 0;
+}
+
+static inline void dm_io_set_flag(struct dm_io *io, unsigned int bit)
+{
+ io->flags |= (1U << bit);
+}
+
static inline void dm_io_inc_pending(struct dm_io *io)
{
atomic_inc(&io->io_count);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 4727b72073fe..fb80539865d7 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -234,7 +234,7 @@ static volatile unsigned long dm_crypt_pages_per_client;
#define DM_CRYPT_MEMORY_PERCENT 2
#define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_VECS * 16)
-static void clone_init(struct dm_crypt_io *, struct bio *);
+static void crypt_endio(struct bio *clone);
static void kcryptd_queue_crypt(struct dm_crypt_io *io);
static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
struct scatterlist *sg);
@@ -1364,11 +1364,10 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
}
if (r == -EBADMSG) {
- char b[BDEVNAME_SIZE];
sector_t s = le64_to_cpu(*sector);
- DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu",
- bio_devname(ctx->bio_in, b), s);
+ DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
+ ctx->bio_in->bi_bdev, s);
dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
ctx->bio_in, s, 0);
}
@@ -1672,11 +1671,10 @@ retry:
if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
mutex_lock(&cc->bio_alloc_lock);
- clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, &cc->bs);
- if (!clone)
- goto out;
-
- clone_init(io, clone);
+ clone = bio_alloc_bioset(cc->dev->bdev, nr_iovecs, io->base_bio->bi_opf,
+ GFP_NOIO, &cc->bs);
+ clone->bi_private = io;
+ clone->bi_end_io = crypt_endio;
remaining_size = size;
@@ -1702,7 +1700,7 @@ retry:
bio_put(clone);
clone = NULL;
}
-out:
+
if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
mutex_unlock(&cc->bio_alloc_lock);
@@ -1829,15 +1827,7 @@ static void crypt_endio(struct bio *clone)
crypt_dec_pending(io);
}
-static void clone_init(struct dm_crypt_io *io, struct bio *clone)
-{
- struct crypt_config *cc = io->cc;
-
- clone->bi_private = io;
- clone->bi_end_io = crypt_endio;
- bio_set_dev(clone, cc->dev->bdev);
- clone->bi_opf = io->base_bio->bi_opf;
-}
+#define CRYPT_MAP_READ_GFP GFP_NOWAIT
static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
{
@@ -1845,18 +1835,19 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
struct bio *clone;
/*
- * We need the original biovec array in order to decrypt
- * the whole bio data *afterwards* -- thanks to immutable
- * biovecs we don't need to worry about the block layer
- * modifying the biovec array; so leverage bio_clone_fast().
+ * We need the original biovec array in order to decrypt the whole bio
+ * data *afterwards* -- thanks to immutable biovecs we don't need to
+ * worry about the block layer modifying the biovec array; so leverage
+ * bio_alloc_clone().
*/
- clone = bio_clone_fast(io->base_bio, gfp, &cc->bs);
+ clone = bio_alloc_clone(cc->dev->bdev, io->base_bio, gfp, &cc->bs);
if (!clone)
return 1;
+ clone->bi_private = io;
+ clone->bi_end_io = crypt_endio;
crypt_inc_pending(io);
- clone_init(io, clone);
clone->bi_iter.bi_sector = cc->start + io->sector;
if (dm_crypt_integrity_io_alloc(io, clone)) {
@@ -1865,7 +1856,7 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
return 1;
}
- submit_bio_noacct(clone);
+ dm_submit_bio_remap(io->base_bio, clone);
return 0;
}
@@ -1891,7 +1882,7 @@ static void kcryptd_io_write(struct dm_crypt_io *io)
{
struct bio *clone = io->ctx.bio_out;
- submit_bio_noacct(clone);
+ dm_submit_bio_remap(io->base_bio, clone);
}
#define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
@@ -1970,7 +1961,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
if ((likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) ||
test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)) {
- submit_bio_noacct(clone);
+ dm_submit_bio_remap(io->base_bio, clone);
return;
}
@@ -2178,11 +2169,10 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
error = cc->iv_gen_ops->post(cc, org_iv_of_dmreq(cc, dmreq), dmreq);
if (error == -EBADMSG) {
- char b[BDEVNAME_SIZE];
sector_t s = le64_to_cpu(*org_sector_of_dmreq(cc, dmreq));
- DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu",
- bio_devname(ctx->bio_in, b), s);
+ DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
+ ctx->bio_in->bi_bdev, s);
dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
ctx->bio_in, s, 0);
io->error = BLK_STS_PROTECTION;
@@ -2589,7 +2579,7 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
static int get_key_size(char **key_string)
{
- return (*key_string[0] == ':') ? -EINVAL : strlen(*key_string) >> 1;
+ return (*key_string[0] == ':') ? -EINVAL : (int)(strlen(*key_string) >> 1);
}
#endif /* CONFIG_KEYS */
@@ -3372,6 +3362,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->num_flush_bios = 1;
ti->limit_swap_bios = true;
+ ti->accounts_remapped_io = true;
dm_audit_log_ctr(DM_MSG_PREFIX, ti, 1);
return 0;
@@ -3440,7 +3431,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
io->ctx.r.req = (struct skcipher_request *)(io + 1);
if (bio_data_dir(io->base_bio) == READ) {
- if (kcryptd_io_read(io, GFP_NOWAIT))
+ if (kcryptd_io_read(io, CRYPT_MAP_READ_GFP))
kcryptd_queue_read(io);
} else
kcryptd_queue_crypt(io);
@@ -3635,7 +3626,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 23, 0},
+ .version = {1, 24, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 59e51d285b0e..9a51bf51a859 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -72,7 +72,7 @@ static void flush_bios(struct bio *bio)
while (bio) {
n = bio->bi_next;
bio->bi_next = NULL;
- submit_bio_noacct(bio);
+ dm_submit_bio_remap(bio, NULL);
bio = n;
}
}
@@ -232,6 +232,7 @@ out:
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
+ ti->accounts_remapped_io = true;
ti->per_io_data_size = sizeof(struct dm_delay_info);
return 0;
@@ -355,7 +356,7 @@ out:
static struct target_type delay_target = {
.name = "delay",
- .version = {1, 2, 1},
+ .version = {1, 3, 0},
.features = DM_TARGET_PASSES_INTEGRITY,
.module = THIS_MODULE,
.ctr = delay_ctr,
diff --git a/drivers/md/dm-ima.c b/drivers/md/dm-ima.c
index 957999998d70..1842d3a958ef 100644
--- a/drivers/md/dm-ima.c
+++ b/drivers/md/dm-ima.c
@@ -455,7 +455,7 @@ void dm_ima_measure_on_device_resume(struct mapped_device *md, bool swap)
scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN,
"%sname=%s,uuid=%s;device_resume=no_data;",
DM_IMA_VERSION_STR, dev_name, dev_uuid);
- l += strlen(device_table_data);
+ l = strlen(device_table_data);
}
@@ -568,7 +568,7 @@ void dm_ima_measure_on_device_remove(struct mapped_device *md, bool remove_all)
scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN,
"%sname=%s,uuid=%s;device_remove=no_data;",
DM_IMA_VERSION_STR, dev_name, dev_uuid);
- l += strlen(device_table_data);
+ l = strlen(device_table_data);
}
memcpy(device_table_data + l, remove_all_str, remove_all_len);
@@ -654,7 +654,7 @@ void dm_ima_measure_on_table_clear(struct mapped_device *md, bool new_map)
scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN,
"%sname=%s,uuid=%s;table_clear=no_data;",
DM_IMA_VERSION_STR, dev_name, dev_uuid);
- l += strlen(device_table_data);
+ l = strlen(device_table_data);
}
capacity_len = strlen(capacity_str);
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index eb4b5e52bd6f..c58a5111cb57 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -1788,12 +1788,11 @@ again:
checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
if (unlikely(r)) {
if (r > 0) {
- char b[BDEVNAME_SIZE];
sector_t s;
s = sector - ((r + ic->tag_size - 1) / ic->tag_size);
- DMERR_LIMIT("%s: Checksum failed at sector 0x%llx",
- bio_devname(bio, b), s);
+ DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx",
+ bio->bi_bdev, s);
r = -EILSEQ;
atomic64_inc(&ic->number_of_mismatches);
dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum",
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 82d431677bb6..5762366333a2 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -339,11 +339,10 @@ static void do_region(int op, int op_flags, unsigned region,
(PAGE_SIZE >> SECTOR_SHIFT)));
}
- bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, &io->client->bios);
+ bio = bio_alloc_bioset(where->bdev, num_bvecs, op | op_flags,
+ GFP_NOIO, &io->client->bios);
bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
- bio_set_dev(bio, where->bdev);
bio->bi_end_io = endio;
- bio_set_op_attrs(bio, op, op_flags);
store_io_and_region_in_bio(bio, io, region);
if (op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) {
@@ -508,14 +507,6 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
return 0;
}
-/*
- * New collapsed (a)synchronous interface.
- *
- * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
- * the queue with blk_unplug() some time later or set REQ_SYNC in
- * io_req->bi_opf. If you fail to do one of these, the IO will be submitted to
- * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
- */
int dm_io(struct dm_io_request *io_req, unsigned num_regions,
struct dm_io_region *where, unsigned long *sync_error_bits)
{
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 21fe8652b095..901abd6dea41 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -18,6 +18,7 @@
#include <linux/dm-ioctl.h>
#include <linux/hdreg.h>
#include <linux/compat.h>
+#include <linux/nospec.h>
#include <linux/uaccess.h>
#include <linux/ima.h>
@@ -1788,6 +1789,7 @@ static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags)
if (unlikely(cmd >= ARRAY_SIZE(_ioctls)))
return NULL;
+ cmd = array_index_nospec(cmd, ARRAY_SIZE(_ioctls));
*ioctl_flags = _ioctls[cmd].flags;
return _ioctls[cmd].fn;
}
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 139b09b06eda..c9d036d6bb2e 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -217,18 +217,12 @@ static int write_metadata(struct log_writes_c *lc, void *entry,
void *ptr;
size_t ret;
- bio = bio_alloc(GFP_KERNEL, 1);
- if (!bio) {
- DMERR("Couldn't alloc log bio");
- goto error;
- }
+ bio = bio_alloc(lc->logdev->bdev, 1, REQ_OP_WRITE, GFP_KERNEL);
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, lc->logdev->bdev);
bio->bi_end_io = (sector == WRITE_LOG_SUPER_SECTOR) ?
log_end_super : log_end_io;
bio->bi_private = lc;
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
page = alloc_page(GFP_KERNEL);
if (!page) {
@@ -275,18 +269,12 @@ static int write_inline_data(struct log_writes_c *lc, void *entry,
atomic_inc(&lc->io_blocks);
- bio = bio_alloc(GFP_KERNEL, bio_pages);
- if (!bio) {
- DMERR("Couldn't alloc inline data bio");
- goto error;
- }
-
+ bio = bio_alloc(lc->logdev->bdev, bio_pages, REQ_OP_WRITE,
+ GFP_KERNEL);
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, lc->logdev->bdev);
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
for (i = 0; i < bio_pages; i++) {
pg_datalen = min_t(int, datalen, PAGE_SIZE);
@@ -322,7 +310,6 @@ static int write_inline_data(struct log_writes_c *lc, void *entry,
error_bio:
bio_free_pages(bio);
bio_put(bio);
-error:
put_io_block(lc);
return -1;
}
@@ -363,17 +350,12 @@ static int log_one_block(struct log_writes_c *lc,
goto out;
atomic_inc(&lc->io_blocks);
- bio = bio_alloc(GFP_KERNEL, bio_max_segs(block->vec_cnt));
- if (!bio) {
- DMERR("Couldn't alloc log bio");
- goto error;
- }
+ bio = bio_alloc(lc->logdev->bdev, bio_max_segs(block->vec_cnt),
+ REQ_OP_WRITE, GFP_KERNEL);
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, lc->logdev->bdev);
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
for (i = 0; i < block->vec_cnt; i++) {
/*
@@ -385,18 +367,13 @@ static int log_one_block(struct log_writes_c *lc,
if (ret != block->vecs[i].bv_len) {
atomic_inc(&lc->io_blocks);
submit_bio(bio);
- bio = bio_alloc(GFP_KERNEL,
- bio_max_segs(block->vec_cnt - i));
- if (!bio) {
- DMERR("Couldn't alloc log bio");
- goto error;
- }
+ bio = bio_alloc(lc->logdev->bdev,
+ bio_max_segs(block->vec_cnt - i),
+ REQ_OP_WRITE, GFP_KERNEL);
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, lc->logdev->bdev);
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
ret = bio_add_page(bio, block->vecs[i].bv_page,
block->vecs[i].bv_len, 0);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 9ab971834a53..6ed9d2731254 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -899,10 +899,7 @@ retain:
if (m->hw_handler_name) {
r = scsi_dh_attach(q, m->hw_handler_name);
if (r == -EBUSY) {
- char b[BDEVNAME_SIZE];
-
- printk(KERN_INFO "dm-mpath: retaining handler on device %s\n",
- bdevname(bdev, b));
+ DMINFO("retaining handler on device %pg", bdev);
goto retain;
}
if (r < 0) {
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 3907950a0ddc..6087cdcaad46 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -300,21 +300,6 @@ static void end_clone_request(struct request *clone, blk_status_t error)
dm_complete_request(tio->orig, error);
}
-static blk_status_t dm_dispatch_clone_request(struct request *clone, struct request *rq)
-{
- blk_status_t r;
-
- if (blk_queue_io_stat(clone->q))
- clone->rq_flags |= RQF_IO_STAT;
-
- clone->start_time_ns = ktime_get_ns();
- r = blk_insert_cloned_request(clone->q, clone);
- if (r != BLK_STS_OK && r != BLK_STS_RESOURCE && r != BLK_STS_DEV_RESOURCE)
- /* must complete clone in terms of original request */
- dm_complete_request(rq, r);
- return r;
-}
-
static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
void *data)
{
@@ -395,13 +380,20 @@ static int map_request(struct dm_rq_target_io *tio)
/* The target has remapped the I/O so dispatch it */
trace_block_rq_remap(clone, disk_devt(dm_disk(md)),
blk_rq_pos(rq));
- ret = dm_dispatch_clone_request(clone, rq);
- if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
+ ret = blk_insert_cloned_request(clone);
+ switch (ret) {
+ case BLK_STS_OK:
+ break;
+ case BLK_STS_RESOURCE:
+ case BLK_STS_DEV_RESOURCE:
blk_rq_unprep_clone(clone);
blk_mq_cleanup_rq(clone);
tio->ti->type->release_clone_rq(clone, &tio->info);
tio->clone = NULL;
return DM_MAPIO_REQUEUE;
+ default:
+ /* must complete clone in terms of original request */
+ dm_complete_request(rq, ret);
}
break;
case DM_MAPIO_REQUEUE:
@@ -496,8 +488,13 @@ static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
if (unlikely(!ti)) {
int srcu_idx;
- struct dm_table *map = dm_get_live_table(md, &srcu_idx);
+ struct dm_table *map;
+ map = dm_get_live_table(md, &srcu_idx);
+ if (unlikely(!map)) {
+ dm_put_live_table(md, srcu_idx);
+ return BLK_STS_RESOURCE;
+ }
ti = dm_table_find_target(map, 0);
dm_put_live_table(md, srcu_idx);
}
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index dcf34c6b05ad..0d336b5ec571 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -141,11 +141,6 @@ struct dm_snapshot {
* for them to be committed.
*/
struct bio_list bios_queued_during_merge;
-
- /*
- * Flush data after merge.
- */
- struct bio flush_bio;
};
/*
@@ -1127,17 +1122,6 @@ shut:
static void error_bios(struct bio *bio);
-static int flush_data(struct dm_snapshot *s)
-{
- struct bio *flush_bio = &s->flush_bio;
-
- bio_reset(flush_bio);
- bio_set_dev(flush_bio, s->origin->bdev);
- flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
-
- return submit_bio_wait(flush_bio);
-}
-
static void merge_callback(int read_err, unsigned long write_err, void *context)
{
struct dm_snapshot *s = context;
@@ -1151,7 +1135,7 @@ static void merge_callback(int read_err, unsigned long write_err, void *context)
goto shut;
}
- if (flush_data(s) < 0) {
+ if (blkdev_issue_flush(s->origin->bdev) < 0) {
DMERR("Flush after merge failed: shutting down merge");
goto shut;
}
@@ -1340,7 +1324,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s->first_merging_chunk = 0;
s->num_merging_chunks = 0;
bio_list_init(&s->bios_queued_during_merge);
- bio_init(&s->flush_bio, NULL, 0);
/* Allocate hash table for COW data */
if (init_hash_tables(s)) {
@@ -1528,8 +1511,6 @@ static void snapshot_dtr(struct dm_target *ti)
dm_exception_store_destroy(s->store);
- bio_uninit(&s->flush_bio);
-
dm_put_device(ti, s->cow);
dm_put_device(ti, s->origin);
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 35d368c418d0..0e039a8c0bf2 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -195,6 +195,7 @@ void dm_stats_init(struct dm_stats *stats)
mutex_init(&stats->mutex);
INIT_LIST_HEAD(&stats->list);
+ stats->precise_timestamps = false;
stats->last = alloc_percpu(struct dm_stats_last_position);
for_each_possible_cpu(cpu) {
last = per_cpu_ptr(stats->last, cpu);
@@ -231,6 +232,22 @@ void dm_stats_cleanup(struct dm_stats *stats)
mutex_destroy(&stats->mutex);
}
+static void dm_stats_recalc_precise_timestamps(struct dm_stats *stats)
+{
+ struct list_head *l;
+ struct dm_stat *tmp_s;
+ bool precise_timestamps = false;
+
+ list_for_each(l, &stats->list) {
+ tmp_s = container_of(l, struct dm_stat, list_entry);
+ if (tmp_s->stat_flags & STAT_PRECISE_TIMESTAMPS) {
+ precise_timestamps = true;
+ break;
+ }
+ }
+ stats->precise_timestamps = precise_timestamps;
+}
+
static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
sector_t step, unsigned stat_flags,
unsigned n_histogram_entries,
@@ -376,6 +393,9 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
}
ret_id = s->id;
list_add_tail_rcu(&s->list_entry, l);
+
+ dm_stats_recalc_precise_timestamps(stats);
+
mutex_unlock(&stats->mutex);
resume_callback(md);
@@ -418,6 +438,9 @@ static int dm_stats_delete(struct dm_stats *stats, int id)
}
list_del_rcu(&s->list_entry);
+
+ dm_stats_recalc_precise_timestamps(stats);
+
mutex_unlock(&stats->mutex);
/*
@@ -621,13 +644,14 @@ static void __dm_stat_bio(struct dm_stat *s, int bi_rw,
void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
sector_t bi_sector, unsigned bi_sectors, bool end,
- unsigned long duration_jiffies,
+ unsigned long start_time,
struct dm_stats_aux *stats_aux)
{
struct dm_stat *s;
sector_t end_sector;
struct dm_stats_last_position *last;
bool got_precise_time;
+ unsigned long duration_jiffies = 0;
if (unlikely(!bi_sectors))
return;
@@ -647,16 +671,16 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
));
WRITE_ONCE(last->last_sector, end_sector);
WRITE_ONCE(last->last_rw, bi_rw);
- }
+ } else
+ duration_jiffies = jiffies - start_time;
rcu_read_lock();
got_precise_time = false;
list_for_each_entry_rcu(s, &stats->list, list_entry) {
if (s->stat_flags & STAT_PRECISE_TIMESTAMPS && !got_precise_time) {
- if (!end)
- stats_aux->duration_ns = ktime_to_ns(ktime_get());
- else
+ /* start (!end) duration_ns is set by DM core's alloc_io() */
+ if (end)
stats_aux->duration_ns = ktime_to_ns(ktime_get()) - stats_aux->duration_ns;
got_precise_time = true;
}
diff --git a/drivers/md/dm-stats.h b/drivers/md/dm-stats.h
index 2ddfae678f32..09c81a1ec057 100644
--- a/drivers/md/dm-stats.h
+++ b/drivers/md/dm-stats.h
@@ -13,8 +13,7 @@ struct dm_stats {
struct mutex mutex;
struct list_head list; /* list of struct dm_stat */
struct dm_stats_last_position __percpu *last;
- sector_t last_sector;
- unsigned last_rw;
+ bool precise_timestamps;
};
struct dm_stats_aux {
@@ -32,7 +31,7 @@ int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
sector_t bi_sector, unsigned bi_sectors, bool end,
- unsigned long duration_jiffies,
+ unsigned long start_time,
struct dm_stats_aux *aux);
static inline bool dm_stats_used(struct dm_stats *st)
@@ -40,4 +39,10 @@ static inline bool dm_stats_used(struct dm_stats *st)
return !list_empty(&st->list);
}
+static inline void dm_stats_record_start(struct dm_stats *stats, struct dm_stats_aux *aux)
+{
+ if (unlikely(stats->precise_timestamps))
+ aux->duration_ns = ktime_to_ns(ktime_get());
+}
+
#endif
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 88ab98637934..03541cfc2317 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -230,15 +230,14 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
sector_t dev_size = bdev_nr_sectors(bdev);
unsigned short logical_block_size_sectors =
limits->logical_block_size >> SECTOR_SHIFT;
- char b[BDEVNAME_SIZE];
if (!dev_size)
return 0;
if ((start >= dev_size) || (start + len > dev_size)) {
- DMWARN("%s: %s too small for target: "
+ DMWARN("%s: %pg too small for target: "
"start=%llu, len=%llu, dev_size=%llu",
- dm_device_name(ti->table->md), bdevname(bdev, b),
+ dm_device_name(ti->table->md), bdev,
(unsigned long long)start,
(unsigned long long)len,
(unsigned long long)dev_size);
@@ -253,10 +252,10 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
unsigned int zone_sectors = bdev_zone_sectors(bdev);
if (start & (zone_sectors - 1)) {
- DMWARN("%s: start=%llu not aligned to h/w zone size %u of %s",
+ DMWARN("%s: start=%llu not aligned to h/w zone size %u of %pg",
dm_device_name(ti->table->md),
(unsigned long long)start,
- zone_sectors, bdevname(bdev, b));
+ zone_sectors, bdev);
return 1;
}
@@ -270,10 +269,10 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
* the sector range.
*/
if (len & (zone_sectors - 1)) {
- DMWARN("%s: len=%llu not aligned to h/w zone size %u of %s",
+ DMWARN("%s: len=%llu not aligned to h/w zone size %u of %pg",
dm_device_name(ti->table->md),
(unsigned long long)len,
- zone_sectors, bdevname(bdev, b));
+ zone_sectors, bdev);
return 1;
}
}
@@ -283,19 +282,19 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
if (start & (logical_block_size_sectors - 1)) {
DMWARN("%s: start=%llu not aligned to h/w "
- "logical block size %u of %s",
+ "logical block size %u of %pg",
dm_device_name(ti->table->md),
(unsigned long long)start,
- limits->logical_block_size, bdevname(bdev, b));
+ limits->logical_block_size, bdev);
return 1;
}
if (len & (logical_block_size_sectors - 1)) {
DMWARN("%s: len=%llu not aligned to h/w "
- "logical block size %u of %s",
+ "logical block size %u of %pg",
dm_device_name(ti->table->md),
(unsigned long long)len,
- limits->logical_block_size, bdevname(bdev, b));
+ limits->logical_block_size, bdev);
return 1;
}
@@ -400,20 +399,19 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
struct queue_limits *limits = data;
struct block_device *bdev = dev->bdev;
struct request_queue *q = bdev_get_queue(bdev);
- char b[BDEVNAME_SIZE];
if (unlikely(!q)) {
- DMWARN("%s: Cannot set limits for nonexistent device %s",
- dm_device_name(ti->table->md), bdevname(bdev, b));
+ DMWARN("%s: Cannot set limits for nonexistent device %pg",
+ dm_device_name(ti->table->md), bdev);
return 0;
}
if (blk_stack_limits(limits, &q->limits,
get_start_sect(bdev) + start) < 0)
- DMWARN("%s: adding target device %s caused an alignment inconsistency: "
+ DMWARN("%s: adding target device %pg caused an alignment inconsistency: "
"physical_block_size=%u, logical_block_size=%u, "
"alignment_offset=%u, start=%llu",
- dm_device_name(ti->table->md), bdevname(bdev, b),
+ dm_device_name(ti->table->md), bdev,
q->limits.physical_block_size,
q->limits.logical_block_size,
q->limits.alignment_offset,
@@ -1483,6 +1481,14 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
return &t->targets[(KEYS_PER_NODE * n) + k];
}
+static int device_not_poll_capable(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+
+ return !test_bit(QUEUE_FLAG_POLL, &q->queue_flags);
+}
+
/*
* type->iterate_devices() should be called when the sanity check needs to
* iterate and check all underlying data devices. iterate_devices() will
@@ -1533,6 +1539,11 @@ static int count_device(struct dm_target *ti, struct dm_dev *dev,
return 0;
}
+static int dm_table_supports_poll(struct dm_table *t)
+{
+ return !dm_table_any_dev_attr(t, device_not_poll_capable, NULL);
+}
+
/*
* Check whether a table has no data devices attached using each
* target's iterate_devices method.
@@ -2040,6 +2051,20 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
dm_update_crypto_profile(q, t);
disk_update_readahead(t->md->disk);
+ /*
+ * Check for request-based device is left to
+ * dm_mq_init_request_queue()->blk_mq_init_allocated_queue().
+ *
+ * For bio-based device, only set QUEUE_FLAG_POLL when all
+ * underlying devices supporting polling.
+ */
+ if (__table_type_bio_based(t->type)) {
+ if (dm_table_supports_poll(t))
+ blk_queue_flag_set(QUEUE_FLAG_POLL, q);
+ else
+ blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
+ }
+
return 0;
}
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 1a96a07cbf44..2db7030aba00 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1665,22 +1665,6 @@ int dm_thin_insert_block(struct dm_thin_device *td, dm_block_t block,
return r;
}
-static int __remove(struct dm_thin_device *td, dm_block_t block)
-{
- int r;
- struct dm_pool_metadata *pmd = td->pmd;
- dm_block_t keys[2] = { td->id, block };
-
- r = dm_btree_remove(&pmd->info, pmd->root, keys, &pmd->root);
- if (r)
- return r;
-
- td->mapped_blocks--;
- td->changed = true;
-
- return 0;
-}
-
static int __remove_range(struct dm_thin_device *td, dm_block_t begin, dm_block_t end)
{
int r;
@@ -1740,18 +1724,6 @@ static int __remove_range(struct dm_thin_device *td, dm_block_t begin, dm_block_
return dm_btree_insert(&pmd->tl_info, pmd->root, keys, &value, &pmd->root);
}
-int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block)
-{
- int r = -EINVAL;
-
- pmd_write_lock(td->pmd);
- if (!td->pmd->fail_io)
- r = __remove(td, block);
- pmd_write_unlock(td->pmd);
-
- return r;
-}
-
int dm_thin_remove_range(struct dm_thin_device *td,
dm_block_t begin, dm_block_t end)
{
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index 7ef56bd2a7e3..4d7a2caf21d9 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -166,7 +166,6 @@ int dm_pool_alloc_data_block(struct dm_pool_metadata *pmd, dm_block_t *result);
int dm_thin_insert_block(struct dm_thin_device *td, dm_block_t block,
dm_block_t data_block);
-int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block);
int dm_thin_remove_range(struct dm_thin_device *td,
dm_block_t begin, dm_block_t end);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index ec119d2422d5..4d25d0e27031 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -161,7 +161,7 @@ static void throttle_work_start(struct throttle *t)
static void throttle_work_update(struct throttle *t)
{
- if (!t->throttle_applied && jiffies > t->threshold) {
+ if (!t->throttle_applied && time_is_before_jiffies(t->threshold)) {
down_write(&t->lock);
t->throttle_applied = true;
}
@@ -282,8 +282,6 @@ struct pool {
struct dm_bio_prison_cell **cell_sort_array;
mempool_t mapping_pool;
-
- struct bio flush_bio;
};
static void metadata_operation_failed(struct pool *pool, const char *op, int r);
@@ -757,7 +755,7 @@ static void issue(struct thin_c *tc, struct bio *bio)
struct pool *pool = tc->pool;
if (!bio_triggers_commit(tc, bio)) {
- submit_bio_noacct(bio);
+ dm_submit_bio_remap(bio, NULL);
return;
}
@@ -1179,25 +1177,17 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
return;
}
- discard_parent = bio_alloc(GFP_NOIO, 1);
- if (!discard_parent) {
- DMWARN("%s: unable to allocate top level discard bio for passdown. Skipping passdown.",
- dm_device_name(tc->pool->pool_md));
- queue_passdown_pt2(m);
-
- } else {
- discard_parent->bi_end_io = passdown_endio;
- discard_parent->bi_private = m;
-
- if (m->maybe_shared)
- passdown_double_checking_shared_status(m, discard_parent);
- else {
- struct discard_op op;
-
- begin_discard(&op, tc, discard_parent);
- r = issue_discard(&op, m->data_block, data_end);
- end_discard(&op, r);
- }
+ discard_parent = bio_alloc(NULL, 1, 0, GFP_NOIO);
+ discard_parent->bi_end_io = passdown_endio;
+ discard_parent->bi_private = m;
+ if (m->maybe_shared)
+ passdown_double_checking_shared_status(m, discard_parent);
+ else {
+ struct discard_op op;
+
+ begin_discard(&op, tc, discard_parent);
+ r = issue_discard(&op, m->data_block, data_end);
+ end_discard(&op, r);
}
}
@@ -2393,7 +2383,7 @@ static void process_deferred_bios(struct pool *pool)
if (bio->bi_opf & REQ_PREFLUSH)
bio_endio(bio);
else
- submit_bio_noacct(bio);
+ dm_submit_bio_remap(bio, NULL);
}
}
@@ -2834,7 +2824,6 @@ static void disable_passdown_if_not_supported(struct pool_c *pt)
struct block_device *data_bdev = pt->data_dev->bdev;
struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
const char *reason = NULL;
- char buf[BDEVNAME_SIZE];
if (!pt->adjusted_pf.discard_passdown)
return;
@@ -2846,7 +2835,7 @@ static void disable_passdown_if_not_supported(struct pool_c *pt)
reason = "max discard sectors smaller than a block";
if (reason) {
- DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason);
+ DMWARN("Data device (%pg) %s: Disabling discard passdown.", data_bdev, reason);
pt->adjusted_pf.discard_passdown = false;
}
}
@@ -2913,7 +2902,6 @@ static void __pool_destroy(struct pool *pool)
if (pool->next_mapping)
mempool_free(pool->next_mapping, &pool->mapping_pool);
mempool_exit(&pool->mapping_pool);
- bio_uninit(&pool->flush_bio);
dm_deferred_set_destroy(pool->shared_read_ds);
dm_deferred_set_destroy(pool->all_io_ds);
kfree(pool);
@@ -2994,7 +2982,6 @@ static struct pool *pool_create(struct mapped_device *pool_md,
pool->low_water_triggered = false;
pool->suspended = true;
pool->out_of_data_space = false;
- bio_init(&pool->flush_bio, NULL, 0);
pool->shared_read_ds = dm_deferred_set_create();
if (!pool->shared_read_ds) {
@@ -3201,13 +3188,8 @@ static void metadata_low_callback(void *context)
static int metadata_pre_commit_callback(void *context)
{
struct pool *pool = context;
- struct bio *flush_bio = &pool->flush_bio;
-
- bio_reset(flush_bio);
- bio_set_dev(flush_bio, pool->data_dev);
- flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
- return submit_bio_wait(flush_bio);
+ return blkdev_issue_flush(pool->data_dev);
}
static sector_t get_dev_size(struct block_device *bdev)
@@ -3218,11 +3200,10 @@ static sector_t get_dev_size(struct block_device *bdev)
static void warn_if_metadata_device_too_big(struct block_device *bdev)
{
sector_t metadata_dev_size = get_dev_size(bdev);
- char buffer[BDEVNAME_SIZE];
if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
- DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
- bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
+ DMWARN("Metadata device %pg is larger than %u sectors: excess space will not be used.",
+ bdev, THIN_METADATA_MAX_SECTORS);
}
static sector_t get_metadata_dev_size(struct block_device *bdev)
@@ -4250,6 +4231,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->num_flush_bios = 1;
ti->flush_supported = true;
+ ti->accounts_remapped_io = true;
ti->per_io_data_size = sizeof(struct dm_thin_endio_hook);
/* In case the pool supports discards, pass them on. */
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 4f31591d2d25..5630b470ba42 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -1821,11 +1821,11 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba
max_pages = e->wc_list_contiguous;
- bio = bio_alloc_bioset(GFP_NOIO, max_pages, &wc->bio_set);
+ bio = bio_alloc_bioset(wc->dev->bdev, max_pages, REQ_OP_WRITE,
+ GFP_NOIO, &wc->bio_set);
wb = container_of(bio, struct writeback_struct, bio);
wb->wc = wc;
bio->bi_end_io = writecache_writeback_endio;
- bio_set_dev(bio, wc->dev->bdev);
bio->bi_iter.bi_sector = read_original_sector(wc, e);
if (max_pages <= WB_LIST_INLINE ||
unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *),
@@ -1852,7 +1852,8 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba
wb->wc_list[wb->wc_list_n++] = f;
e = f;
}
- bio_set_op_attrs(bio, REQ_OP_WRITE, WC_MODE_FUA(wc) * REQ_FUA);
+ if (WC_MODE_FUA(wc))
+ bio->bi_opf |= REQ_FUA;
if (writecache_has_error(wc)) {
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index ee4626d08557..d1ea66114d14 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -550,11 +550,8 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
if (!mblk)
return ERR_PTR(-ENOMEM);
- bio = bio_alloc(GFP_NOIO, 1);
- if (!bio) {
- dmz_free_mblock(zmd, mblk);
- return ERR_PTR(-ENOMEM);
- }
+ bio = bio_alloc(dev->bdev, 1, REQ_OP_READ | REQ_META | REQ_PRIO,
+ GFP_NOIO);
spin_lock(&zmd->mblk_lock);
@@ -578,10 +575,8 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
/* Submit read BIO */
bio->bi_iter.bi_sector = dmz_blk2sect(block);
- bio_set_dev(bio, dev->bdev);
bio->bi_private = mblk;
bio->bi_end_io = dmz_mblock_bio_end_io;
- bio_set_op_attrs(bio, REQ_OP_READ, REQ_META | REQ_PRIO);
bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
submit_bio(bio);
@@ -725,19 +720,14 @@ static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
if (dmz_bdev_is_dying(dev))
return -EIO;
- bio = bio_alloc(GFP_NOIO, 1);
- if (!bio) {
- set_bit(DMZ_META_ERROR, &mblk->state);
- return -ENOMEM;
- }
+ bio = bio_alloc(dev->bdev, 1, REQ_OP_WRITE | REQ_META | REQ_PRIO,
+ GFP_NOIO);
set_bit(DMZ_META_WRITING, &mblk->state);
bio->bi_iter.bi_sector = dmz_blk2sect(block);
- bio_set_dev(bio, dev->bdev);
bio->bi_private = mblk;
bio->bi_end_io = dmz_mblock_bio_end_io;
- bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
submit_bio(bio);
@@ -759,13 +749,9 @@ static int dmz_rdwr_block(struct dmz_dev *dev, int op,
if (dmz_bdev_is_dying(dev))
return -EIO;
- bio = bio_alloc(GFP_NOIO, 1);
- if (!bio)
- return -ENOMEM;
-
+ bio = bio_alloc(dev->bdev, 1, op | REQ_SYNC | REQ_META | REQ_PRIO,
+ GFP_NOIO);
bio->bi_iter.bi_sector = dmz_blk2sect(block);
- bio_set_dev(bio, dev->bdev);
- bio_set_op_attrs(bio, op, REQ_SYNC | REQ_META | REQ_PRIO);
bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0);
ret = submit_bio_wait(bio);
bio_put(bio);
@@ -1115,8 +1101,8 @@ static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_sb *dsb,
*/
static int dmz_read_sb(struct dmz_metadata *zmd, struct dmz_sb *sb, int set)
{
- dmz_zmd_debug(zmd, "read superblock set %d dev %s block %llu",
- set, sb->dev->name, sb->block);
+ dmz_zmd_debug(zmd, "read superblock set %d dev %pg block %llu",
+ set, sb->dev->bdev, sb->block);
return dmz_rdwr_block(sb->dev, REQ_OP_READ,
sb->block, sb->mblk->page);
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 166c4e9d99c9..cac295cc8840 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -125,11 +125,10 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
if (dev->flags & DMZ_BDEV_DYING)
return -EIO;
- clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set);
+ clone = bio_alloc_clone(dev->bdev, bio, GFP_NOIO, &dmz->bio_set);
if (!clone)
return -ENOMEM;
- bio_set_dev(clone, dev->bdev);
bioctx->dev = dev;
clone->bi_iter.bi_sector =
dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
@@ -731,7 +730,6 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path,
}
dev->bdev = bdev;
dev->dev_idx = idx;
- (void)bdevname(dev->bdev, dev->name);
dev->capacity = bdev_nr_sectors(bdev);
if (ti->begin) {
diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h
index 22f11440b423..a02744a0846c 100644
--- a/drivers/md/dm-zoned.h
+++ b/drivers/md/dm-zoned.h
@@ -56,7 +56,6 @@ struct dmz_dev {
struct dmz_metadata *metadata;
struct dmz_reclaim *reclaim;
- char name[BDEVNAME_SIZE];
uuid_t uuid;
sector_t capacity;
@@ -176,16 +175,16 @@ enum {
* Message functions.
*/
#define dmz_dev_info(dev, format, args...) \
- DMINFO("(%s): " format, (dev)->name, ## args)
+ DMINFO("(%pg): " format, (dev)->bdev, ## args)
#define dmz_dev_err(dev, format, args...) \
- DMERR("(%s): " format, (dev)->name, ## args)
+ DMERR("(%pg): " format, (dev)->bdev, ## args)
#define dmz_dev_warn(dev, format, args...) \
- DMWARN("(%s): " format, (dev)->name, ## args)
+ DMWARN("(%pg): " format, (dev)->bdev, ## args)
#define dmz_dev_debug(dev, format, args...) \
- DMDEBUG("(%s): " format, (dev)->name, ## args)
+ DMDEBUG("(%pg): " format, (dev)->bdev, ## args)
/*
* Functions defined in dm-zoned-metadata.c
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 79e273924afd..ad2e0bbeb559 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -40,6 +40,13 @@
#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
#define DM_COOKIE_LENGTH 24
+/*
+ * For REQ_POLLED fs bio, this flag is set if we link mapped underlying
+ * dm_io into one list, and reuse bio->bi_private as the list head. Before
+ * ending this fs bio, we will recover its ->bi_private.
+ */
+#define REQ_DM_POLL_LIST REQ_DRV
+
static const char *_name = DM_NAME;
static unsigned int major = 0;
@@ -73,16 +80,21 @@ struct clone_info {
struct dm_io *io;
sector_t sector;
unsigned sector_count;
+ bool submit_as_polled;
};
#define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone))
#define DM_IO_BIO_OFFSET \
(offsetof(struct dm_target_io, clone) + offsetof(struct dm_io, tio))
+static inline struct dm_target_io *clone_to_tio(struct bio *clone)
+{
+ return container_of(clone, struct dm_target_io, clone);
+}
+
void *dm_per_bio_data(struct bio *bio, size_t data_size)
{
- struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
- if (!tio->inside_dm_io)
+ if (!dm_tio_flagged(clone_to_tio(bio), DM_TIO_INSIDE_DM_IO))
return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size;
return (char *)bio - DM_IO_BIO_OFFSET - data_size;
}
@@ -477,40 +489,78 @@ out:
u64 dm_start_time_ns_from_clone(struct bio *bio)
{
- struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
- struct dm_io *io = tio->io;
-
- return jiffies_to_nsecs(io->start_time);
+ return jiffies_to_nsecs(clone_to_tio(bio)->io->start_time);
}
EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone);
-static void start_io_acct(struct dm_io *io)
+static bool bio_is_flush_with_data(struct bio *bio)
{
- struct mapped_device *md = io->md;
- struct bio *bio = io->orig_bio;
+ return ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size);
+}
+
+static void dm_io_acct(bool end, struct mapped_device *md, struct bio *bio,
+ unsigned long start_time, struct dm_stats_aux *stats_aux)
+{
+ bool is_flush_with_data;
+ unsigned int bi_size;
+
+ /* If REQ_PREFLUSH set save any payload but do not account it */
+ is_flush_with_data = bio_is_flush_with_data(bio);
+ if (is_flush_with_data) {
+ bi_size = bio->bi_iter.bi_size;
+ bio->bi_iter.bi_size = 0;
+ }
+
+ if (!end)
+ bio_start_io_acct_time(bio, start_time);
+ else
+ bio_end_io_acct(bio, start_time);
- io->start_time = bio_start_io_acct(bio);
if (unlikely(dm_stats_used(&md->stats)))
dm_stats_account_io(&md->stats, bio_data_dir(bio),
bio->bi_iter.bi_sector, bio_sectors(bio),
- false, 0, &io->stats_aux);
+ end, start_time, stats_aux);
+
+ /* Restore bio's payload so it does get accounted upon requeue */
+ if (is_flush_with_data)
+ bio->bi_iter.bi_size = bi_size;
}
-static void end_io_acct(struct mapped_device *md, struct bio *bio,
- unsigned long start_time, struct dm_stats_aux *stats_aux)
+static void __dm_start_io_acct(struct dm_io *io, struct bio *bio)
{
- unsigned long duration = jiffies - start_time;
+ dm_io_acct(false, io->md, bio, io->start_time, &io->stats_aux);
+}
- bio_end_io_acct(bio, start_time);
+static void dm_start_io_acct(struct dm_io *io, struct bio *clone)
+{
+ /* Must account IO to DM device in terms of orig_bio */
+ struct bio *bio = io->orig_bio;
- if (unlikely(dm_stats_used(&md->stats)))
- dm_stats_account_io(&md->stats, bio_data_dir(bio),
- bio->bi_iter.bi_sector, bio_sectors(bio),
- true, duration, stats_aux);
+ /*
+ * Ensure IO accounting is only ever started once.
+ * Expect no possibility for race unless DM_TIO_IS_DUPLICATE_BIO.
+ */
+ if (!clone ||
+ likely(!dm_tio_flagged(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO))) {
+ if (WARN_ON_ONCE(dm_io_flagged(io, DM_IO_ACCOUNTED)))
+ return;
+ dm_io_set_flag(io, DM_IO_ACCOUNTED);
+ } else {
+ unsigned long flags;
+ if (dm_io_flagged(io, DM_IO_ACCOUNTED))
+ return;
+ /* Can afford locking given DM_TIO_IS_DUPLICATE_BIO */
+ spin_lock_irqsave(&io->lock, flags);
+ dm_io_set_flag(io, DM_IO_ACCOUNTED);
+ spin_unlock_irqrestore(&io->lock, flags);
+ }
- /* nudge anyone waiting on suspend queue */
- if (unlikely(wq_has_sleeper(&md->wait)))
- wake_up(&md->wait);
+ __dm_start_io_acct(io, bio);
+}
+
+static void dm_end_io_acct(struct dm_io *io, struct bio *bio)
+{
+ dm_io_acct(true, io->md, bio, io->start_time, &io->stats_aux);
}
static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
@@ -519,62 +569,80 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
struct dm_target_io *tio;
struct bio *clone;
- clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs);
- if (!clone)
- return NULL;
+ clone = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO, &md->io_bs);
- tio = container_of(clone, struct dm_target_io, clone);
- tio->inside_dm_io = true;
+ tio = clone_to_tio(clone);
+ tio->flags = 0;
+ dm_tio_set_flag(tio, DM_TIO_INSIDE_DM_IO);
tio->io = NULL;
io = container_of(tio, struct dm_io, tio);
io->magic = DM_IO_MAGIC;
io->status = 0;
atomic_set(&io->io_count, 1);
- io->orig_bio = bio;
+ this_cpu_inc(*md->pending_io);
+ io->orig_bio = NULL;
io->md = md;
- spin_lock_init(&io->endio_lock);
+ io->map_task = current;
+ spin_lock_init(&io->lock);
+ io->start_time = jiffies;
+ io->flags = 0;
- start_io_acct(io);
+ dm_stats_record_start(&md->stats, &io->stats_aux);
return io;
}
-static void free_io(struct mapped_device *md, struct dm_io *io)
+static void free_io(struct dm_io *io)
{
bio_put(&io->tio.clone);
}
-static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti,
- unsigned target_bio_nr, gfp_t gfp_mask)
+static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
+ unsigned target_bio_nr, unsigned *len, gfp_t gfp_mask)
{
struct dm_target_io *tio;
+ struct bio *clone;
if (!ci->io->tio.io) {
/* the dm_target_io embedded in ci->io is available */
tio = &ci->io->tio;
+ /* alloc_io() already initialized embedded clone */
+ clone = &tio->clone;
} else {
- struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs);
+ clone = bio_alloc_clone(ci->bio->bi_bdev, ci->bio,
+ gfp_mask, &ci->io->md->bs);
if (!clone)
return NULL;
- tio = container_of(clone, struct dm_target_io, clone);
- tio->inside_dm_io = false;
+ /* REQ_DM_POLL_LIST shouldn't be inherited */
+ clone->bi_opf &= ~REQ_DM_POLL_LIST;
+
+ tio = clone_to_tio(clone);
+ tio->flags = 0; /* also clears DM_TIO_INSIDE_DM_IO */
}
tio->magic = DM_TIO_MAGIC;
tio->io = ci->io;
tio->ti = ti;
tio->target_bio_nr = target_bio_nr;
+ tio->len_ptr = len;
+ tio->old_sector = 0;
- return tio;
+ if (len) {
+ clone->bi_iter.bi_size = to_bytes(*len);
+ if (bio_integrity(clone))
+ bio_integrity_trim(clone);
+ }
+
+ return clone;
}
-static void free_tio(struct dm_target_io *tio)
+static void free_tio(struct bio *clone)
{
- if (tio->inside_dm_io)
+ if (dm_tio_flagged(clone_to_tio(clone), DM_TIO_INSIDE_DM_IO))
return;
- bio_put(&tio->clone);
+ bio_put(clone);
}
/*
@@ -779,71 +847,100 @@ static int __noflush_suspending(struct mapped_device *md)
return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
}
-/*
- * Decrements the number of outstanding ios that a bio has been
- * cloned into, completing the original io if necc.
- */
-void dm_io_dec_pending(struct dm_io *io, blk_status_t error)
+static void dm_io_complete(struct dm_io *io)
{
- unsigned long flags;
blk_status_t io_error;
- struct bio *bio;
struct mapped_device *md = io->md;
- unsigned long start_time = 0;
- struct dm_stats_aux stats_aux;
-
- /* Push-back supersedes any I/O errors */
- if (unlikely(error)) {
- spin_lock_irqsave(&io->endio_lock, flags);
- if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md)))
- io->status = error;
- spin_unlock_irqrestore(&io->endio_lock, flags);
- }
+ struct bio *bio = io->orig_bio;
- if (atomic_dec_and_test(&io->io_count)) {
- bio = io->orig_bio;
- if (io->status == BLK_STS_DM_REQUEUE) {
+ if (io->status == BLK_STS_DM_REQUEUE) {
+ unsigned long flags;
+ /*
+ * Target requested pushing back the I/O.
+ */
+ spin_lock_irqsave(&md->deferred_lock, flags);
+ if (__noflush_suspending(md) &&
+ !WARN_ON_ONCE(dm_is_zone_write(md, bio))) {
+ /* NOTE early return due to BLK_STS_DM_REQUEUE below */
+ bio_list_add_head(&md->deferred, bio);
+ } else {
/*
- * Target requested pushing back the I/O.
+ * noflush suspend was interrupted or this is
+ * a write to a zoned target.
*/
- spin_lock_irqsave(&md->deferred_lock, flags);
- if (__noflush_suspending(md) &&
- !WARN_ON_ONCE(dm_is_zone_write(md, bio))) {
- /* NOTE early return due to BLK_STS_DM_REQUEUE below */
- bio_list_add_head(&md->deferred, bio);
- } else {
- /*
- * noflush suspend was interrupted or this is
- * a write to a zoned target.
- */
- io->status = BLK_STS_IOERR;
- }
- spin_unlock_irqrestore(&md->deferred_lock, flags);
+ io->status = BLK_STS_IOERR;
}
+ spin_unlock_irqrestore(&md->deferred_lock, flags);
+ }
- io_error = io->status;
- start_time = io->start_time;
- stats_aux = io->stats_aux;
- free_io(md, io);
- end_io_acct(md, bio, start_time, &stats_aux);
+ io_error = io->status;
+ if (dm_io_flagged(io, DM_IO_ACCOUNTED))
+ dm_end_io_acct(io, bio);
+ else if (!io_error) {
+ /*
+ * Must handle target that DM_MAPIO_SUBMITTED only to
+ * then bio_endio() rather than dm_submit_bio_remap()
+ */
+ __dm_start_io_acct(io, bio);
+ dm_end_io_acct(io, bio);
+ }
+ free_io(io);
+ smp_wmb();
+ this_cpu_dec(*md->pending_io);
- if (io_error == BLK_STS_DM_REQUEUE)
- return;
+ /* nudge anyone waiting on suspend queue */
+ if (unlikely(wq_has_sleeper(&md->wait)))
+ wake_up(&md->wait);
- if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
- /*
- * Preflush done for flush with data, reissue
- * without REQ_PREFLUSH.
- */
- bio->bi_opf &= ~REQ_PREFLUSH;
- queue_io(md, bio);
- } else {
- /* done with normal IO or empty flush */
- if (io_error)
- bio->bi_status = io_error;
- bio_endio(bio);
- }
+ if (io_error == BLK_STS_DM_REQUEUE) {
+ /*
+ * Upper layer won't help us poll split bio, io->orig_bio
+ * may only reflect a subset of the pre-split original,
+ * so clear REQ_POLLED in case of requeue
+ */
+ bio->bi_opf &= ~REQ_POLLED;
+ return;
+ }
+
+ if (bio_is_flush_with_data(bio)) {
+ /*
+ * Preflush done for flush with data, reissue
+ * without REQ_PREFLUSH.
+ */
+ bio->bi_opf &= ~REQ_PREFLUSH;
+ queue_io(md, bio);
+ } else {
+ /* done with normal IO or empty flush */
+ if (io_error)
+ bio->bi_status = io_error;
+ bio_endio(bio);
+ }
+}
+
+static inline bool dm_tio_is_normal(struct dm_target_io *tio)
+{
+ return (dm_tio_flagged(tio, DM_TIO_INSIDE_DM_IO) &&
+ !dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO));
+}
+
+/*
+ * Decrements the number of outstanding ios that a bio has been
+ * cloned into, completing the original io if necc.
+ */
+void dm_io_dec_pending(struct dm_io *io, blk_status_t error)
+{
+ /* Push-back supersedes any I/O errors */
+ if (unlikely(error)) {
+ unsigned long flags;
+ spin_lock_irqsave(&io->lock, flags);
+ if (!(io->status == BLK_STS_DM_REQUEUE &&
+ __noflush_suspending(io->md)))
+ io->status = error;
+ spin_unlock_irqrestore(&io->lock, flags);
}
+
+ if (atomic_dec_and_test(&io->io_count))
+ dm_io_complete(io);
}
void disable_discard(struct mapped_device *md)
@@ -871,7 +968,7 @@ static bool swap_bios_limit(struct dm_target *ti, struct bio *bio)
static void clone_endio(struct bio *bio)
{
blk_status_t error = bio->bi_status;
- struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
+ struct dm_target_io *tio = clone_to_tio(bio);
struct dm_io *io = tio->io;
struct mapped_device *md = tio->io->md;
dm_endio_fn endio = tio->ti->type->end_io;
@@ -919,7 +1016,7 @@ static void clone_endio(struct bio *bio)
up(&md->swap_bios_semaphore);
}
- free_tio(tio);
+ free_tio(bio);
dm_io_dec_pending(io, error);
}
@@ -1046,7 +1143,8 @@ static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
/*
* A target may call dm_accept_partial_bio only from the map routine. It is
* allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management
- * operations and REQ_OP_ZONE_APPEND (zone append writes).
+ * operations, REQ_OP_ZONE_APPEND (zone append writes) and any bio serviced by
+ * __send_duplicate_bios().
*
* dm_accept_partial_bio informs the dm that the target only wants to process
* additional n_sectors sectors of the bio and the rest of the data should be
@@ -1074,10 +1172,10 @@ static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
*/
void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
{
- struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
+ struct dm_target_io *tio = clone_to_tio(bio);
unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
- BUG_ON(bio->bi_opf & REQ_PREFLUSH);
+ BUG_ON(dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO));
BUG_ON(op_is_zone_mgmt(bio_op(bio)));
BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND);
BUG_ON(bi_size > *tio->len_ptr);
@@ -1088,6 +1186,56 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
}
EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
+static inline void __dm_submit_bio_remap(struct bio *clone,
+ dev_t dev, sector_t old_sector)
+{
+ trace_block_bio_remap(clone, dev, old_sector);
+ submit_bio_noacct(clone);
+}
+
+/*
+ * @clone: clone bio that DM core passed to target's .map function
+ * @tgt_clone: clone of @clone bio that target needs submitted
+ *
+ * Targets should use this interface to submit bios they take
+ * ownership of when returning DM_MAPIO_SUBMITTED.
+ *
+ * Target should also enable ti->accounts_remapped_io
+ */
+void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone)
+{
+ struct dm_target_io *tio = clone_to_tio(clone);
+ struct dm_io *io = tio->io;
+
+ WARN_ON_ONCE(!tio->ti->accounts_remapped_io);
+
+ /* establish bio that will get submitted */
+ if (!tgt_clone)
+ tgt_clone = clone;
+
+ /*
+ * Account io->origin_bio to DM dev on behalf of target
+ * that took ownership of IO with DM_MAPIO_SUBMITTED.
+ */
+ if (io->map_task == current) {
+ /* Still in target's map function */
+ dm_io_set_flag(io, DM_IO_START_ACCT);
+ } else {
+ /*
+ * Called by another thread, managed by DM target,
+ * wait for dm_split_and_process_bio() to store
+ * io->orig_bio
+ */
+ while (unlikely(!smp_load_acquire(&io->orig_bio)))
+ msleep(1);
+ dm_start_io_acct(io, clone);
+ }
+
+ __dm_submit_bio_remap(tgt_clone, disk_devt(io->md->disk),
+ tio->old_sector);
+}
+EXPORT_SYMBOL_GPL(dm_submit_bio_remap);
+
static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch)
{
mutex_lock(&md->swap_bios_lock);
@@ -1104,23 +1252,20 @@ static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch)
mutex_unlock(&md->swap_bios_lock);
}
-static void __map_bio(struct dm_target_io *tio)
+static void __map_bio(struct bio *clone)
{
+ struct dm_target_io *tio = clone_to_tio(clone);
int r;
- sector_t sector;
- struct bio *clone = &tio->clone;
struct dm_io *io = tio->io;
struct dm_target *ti = tio->ti;
clone->bi_end_io = clone_endio;
/*
- * Map the clone. If r == 0 we don't need to do
- * anything, the target has assumed ownership of
- * this io.
+ * Map the clone.
*/
dm_io_inc_pending(io);
- sector = clone->bi_iter.bi_sector;
+ tio->old_sector = clone->bi_iter.bi_sector;
if (unlikely(swap_bios_limit(ti, clone))) {
struct mapped_device *md = io->md;
@@ -1142,27 +1287,28 @@ static void __map_bio(struct dm_target_io *tio)
switch (r) {
case DM_MAPIO_SUBMITTED:
+ /* target has assumed ownership of this io */
+ if (!ti->accounts_remapped_io)
+ dm_io_set_flag(io, DM_IO_START_ACCT);
break;
case DM_MAPIO_REMAPPED:
- /* the bio has been remapped so dispatch it */
- trace_block_bio_remap(clone, bio_dev(io->orig_bio), sector);
- submit_bio_noacct(clone);
+ /*
+ * the bio has been remapped so dispatch it, but defer
+ * dm_start_io_acct() until after possible bio_split().
+ */
+ __dm_submit_bio_remap(clone, disk_devt(io->md->disk),
+ tio->old_sector);
+ dm_io_set_flag(io, DM_IO_START_ACCT);
break;
case DM_MAPIO_KILL:
- if (unlikely(swap_bios_limit(ti, clone))) {
- struct mapped_device *md = io->md;
- up(&md->swap_bios_semaphore);
- }
- free_tio(tio);
- dm_io_dec_pending(io, BLK_STS_IOERR);
- break;
case DM_MAPIO_REQUEUE:
- if (unlikely(swap_bios_limit(ti, clone))) {
- struct mapped_device *md = io->md;
- up(&md->swap_bios_semaphore);
- }
- free_tio(tio);
- dm_io_dec_pending(io, BLK_STS_DM_REQUEUE);
+ if (unlikely(swap_bios_limit(ti, clone)))
+ up(&io->md->swap_bios_semaphore);
+ free_tio(clone);
+ if (r == DM_MAPIO_KILL)
+ dm_io_dec_pending(io, BLK_STS_IOERR);
+ else
+ dm_io_dec_pending(io, BLK_STS_DM_REQUEUE);
break;
default:
DMWARN("unimplemented target map return value: %d", r);
@@ -1170,119 +1316,61 @@ static void __map_bio(struct dm_target_io *tio)
}
}
-static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
-{
- bio->bi_iter.bi_sector = sector;
- bio->bi_iter.bi_size = to_bytes(len);
-}
-
-/*
- * Creates a bio that consists of range of complete bvecs.
- */
-static int clone_bio(struct dm_target_io *tio, struct bio *bio,
- sector_t sector, unsigned len)
-{
- struct bio *clone = &tio->clone;
- int r;
-
- __bio_clone_fast(clone, bio);
-
- r = bio_crypt_clone(clone, bio, GFP_NOIO);
- if (r < 0)
- return r;
-
- if (bio_integrity(bio)) {
- if (unlikely(!dm_target_has_integrity(tio->ti->type) &&
- !dm_target_passes_integrity(tio->ti->type))) {
- DMWARN("%s: the target %s doesn't support integrity data.",
- dm_device_name(tio->io->md),
- tio->ti->type->name);
- return -EIO;
- }
-
- r = bio_integrity_clone(clone, bio, GFP_NOIO);
- if (r < 0)
- return r;
- }
-
- bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
- clone->bi_iter.bi_size = to_bytes(len);
-
- if (bio_integrity(bio))
- bio_integrity_trim(clone);
-
- return 0;
-}
-
static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
- struct dm_target *ti, unsigned num_bios)
+ struct dm_target *ti, unsigned num_bios,
+ unsigned *len)
{
- struct dm_target_io *tio;
+ struct bio *bio;
int try;
- if (!num_bios)
- return;
-
- if (num_bios == 1) {
- tio = alloc_tio(ci, ti, 0, GFP_NOIO);
- bio_list_add(blist, &tio->clone);
- return;
- }
-
for (try = 0; try < 2; try++) {
int bio_nr;
- struct bio *bio;
if (try)
mutex_lock(&ci->io->md->table_devices_lock);
for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
- tio = alloc_tio(ci, ti, bio_nr, try ? GFP_NOIO : GFP_NOWAIT);
- if (!tio)
+ bio = alloc_tio(ci, ti, bio_nr, len,
+ try ? GFP_NOIO : GFP_NOWAIT);
+ if (!bio)
break;
- bio_list_add(blist, &tio->clone);
+ bio_list_add(blist, bio);
}
if (try)
mutex_unlock(&ci->io->md->table_devices_lock);
if (bio_nr == num_bios)
return;
- while ((bio = bio_list_pop(blist))) {
- tio = container_of(bio, struct dm_target_io, clone);
- free_tio(tio);
- }
+ while ((bio = bio_list_pop(blist)))
+ free_tio(bio);
}
}
-static void __clone_and_map_simple_bio(struct clone_info *ci,
- struct dm_target_io *tio, unsigned *len)
-{
- struct bio *clone = &tio->clone;
-
- tio->len_ptr = len;
-
- __bio_clone_fast(clone, ci->bio);
- if (len)
- bio_setup_sector(clone, ci->sector, *len);
- __map_bio(tio);
-}
-
static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
unsigned num_bios, unsigned *len)
{
struct bio_list blist = BIO_EMPTY_LIST;
- struct bio *bio;
- struct dm_target_io *tio;
-
- alloc_multiple_bios(&blist, ci, ti, num_bios);
+ struct bio *clone;
- while ((bio = bio_list_pop(&blist))) {
- tio = container_of(bio, struct dm_target_io, clone);
- __clone_and_map_simple_bio(ci, tio, len);
+ switch (num_bios) {
+ case 0:
+ break;
+ case 1:
+ clone = alloc_tio(ci, ti, 0, len, GFP_NOIO);
+ dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO);
+ __map_bio(clone);
+ break;
+ default:
+ alloc_multiple_bios(&blist, ci, ti, num_bios, len);
+ while ((clone = bio_list_pop(&blist))) {
+ dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO);
+ __map_bio(clone);
+ }
+ break;
}
}
-static int __send_empty_flush(struct clone_info *ci)
+static void __send_empty_flush(struct clone_info *ci)
{
unsigned target_nr = 0;
struct dm_target *ti;
@@ -1293,63 +1381,34 @@ static int __send_empty_flush(struct clone_info *ci)
* need to reference it after submit. It's just used as
* the basis for the clone(s).
*/
- bio_init(&flush_bio, NULL, 0);
- flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
- bio_set_dev(&flush_bio, ci->io->md->disk->part0);
+ bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0,
+ REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC);
ci->bio = &flush_bio;
ci->sector_count = 0;
- BUG_ON(bio_has_data(ci->bio));
while ((ti = dm_table_get_target(ci->map, target_nr++)))
__send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
bio_uninit(ci->bio);
- return 0;
-}
-
-static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
- sector_t sector, unsigned *len)
-{
- struct bio *bio = ci->bio;
- struct dm_target_io *tio;
- int r;
-
- tio = alloc_tio(ci, ti, 0, GFP_NOIO);
- tio->len_ptr = len;
- r = clone_bio(tio, bio, sector, *len);
- if (r < 0) {
- free_tio(tio);
- return r;
- }
- __map_bio(tio);
-
- return 0;
}
-static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
- unsigned num_bios)
+static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
+ unsigned num_bios)
{
unsigned len;
- /*
- * Even though the device advertised support for this type of
- * request, that does not mean every target supports it, and
- * reconfiguration might also have changed that since the
- * check was performed.
- */
- if (!num_bios)
- return -EOPNOTSUPP;
-
len = min_t(sector_t, ci->sector_count,
max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector)));
- __send_duplicate_bios(ci, ti, num_bios, &len);
-
+ /*
+ * dm_accept_partial_bio cannot be used with duplicate bios,
+ * so update clone_info cursor before __send_duplicate_bios().
+ */
ci->sector += len;
ci->sector_count -= len;
- return 0;
+ __send_duplicate_bios(ci, ti, num_bios, &len);
}
static bool is_abnormal_io(struct bio *bio)
@@ -1370,10 +1429,9 @@ static bool is_abnormal_io(struct bio *bio)
static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
int *result)
{
- struct bio *bio = ci->bio;
unsigned num_bios = 0;
- switch (bio_op(bio)) {
+ switch (bio_op(ci->bio)) {
case REQ_OP_DISCARD:
num_bios = ti->num_discard_bios;
break;
@@ -1387,15 +1445,68 @@ static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
return false;
}
- *result = __send_changing_extent_only(ci, ti, num_bios);
+ /*
+ * Even though the device advertised support for this type of
+ * request, that does not mean every target supports it, and
+ * reconfiguration might also have changed that since the
+ * check was performed.
+ */
+ if (!num_bios)
+ *result = -EOPNOTSUPP;
+ else {
+ __send_changing_extent_only(ci, ti, num_bios);
+ *result = 0;
+ }
return true;
}
/*
+ * Reuse ->bi_private as hlist head for storing all dm_io instances
+ * associated with this bio, and this bio's bi_private needs to be
+ * stored in dm_io->data before the reuse.
+ *
+ * bio->bi_private is owned by fs or upper layer, so block layer won't
+ * touch it after splitting. Meantime it won't be changed by anyone after
+ * bio is submitted. So this reuse is safe.
+ */
+static inline struct hlist_head *dm_get_bio_hlist_head(struct bio *bio)
+{
+ return (struct hlist_head *)&bio->bi_private;
+}
+
+static void dm_queue_poll_io(struct bio *bio, struct dm_io *io)
+{
+ struct hlist_head *head = dm_get_bio_hlist_head(bio);
+
+ if (!(bio->bi_opf & REQ_DM_POLL_LIST)) {
+ bio->bi_opf |= REQ_DM_POLL_LIST;
+ /*
+ * Save .bi_private into dm_io, so that we can reuse
+ * .bi_private as hlist head for storing dm_io list
+ */
+ io->data = bio->bi_private;
+
+ INIT_HLIST_HEAD(head);
+
+ /* tell block layer to poll for completion */
+ bio->bi_cookie = ~BLK_QC_T_NONE;
+ } else {
+ /*
+ * bio recursed due to split, reuse original poll list,
+ * and save bio->bi_private too.
+ */
+ io->data = hlist_entry(head->first, struct dm_io, node)->data;
+ }
+
+ hlist_add_head(&io->node, head);
+}
+
+/*
* Select the correct strategy for processing a non-flush bio.
*/
-static int __split_and_process_non_flush(struct clone_info *ci)
+static int __split_and_process_bio(struct clone_info *ci)
{
+ struct bio *clone;
struct dm_target *ti;
unsigned len;
int r;
@@ -1407,11 +1518,15 @@ static int __split_and_process_non_flush(struct clone_info *ci)
if (__process_abnormal_io(ci, ti, &r))
return r;
- len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count);
+ /*
+ * Only support bio polling for normal IO, and the target io is
+ * exactly inside the dm_io instance (verified in dm_poll_dm_io)
+ */
+ ci->submit_as_polled = ci->bio->bi_opf & REQ_POLLED;
- r = __clone_and_map_data_bio(ci, ti, ci->sector, &len);
- if (r < 0)
- return r;
+ len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count);
+ clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO);
+ __map_bio(clone);
ci->sector += len;
ci->sector_count -= len;
@@ -1424,67 +1539,69 @@ static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
{
ci->map = map;
ci->io = alloc_io(md, bio);
+ ci->bio = bio;
+ ci->submit_as_polled = false;
ci->sector = bio->bi_iter.bi_sector;
-}
+ ci->sector_count = bio_sectors(bio);
-#define __dm_part_stat_sub(part, field, subnd) \
- (part_stat_get(part, field) -= (subnd))
+ /* Shouldn't happen but sector_count was being set to 0 so... */
+ if (WARN_ON_ONCE(op_is_zone_mgmt(bio_op(bio)) && ci->sector_count))
+ ci->sector_count = 0;
+}
/*
* Entry point to split a bio into clones and submit them to the targets.
*/
-static void __split_and_process_bio(struct mapped_device *md,
- struct dm_table *map, struct bio *bio)
+static void dm_split_and_process_bio(struct mapped_device *md,
+ struct dm_table *map, struct bio *bio)
{
struct clone_info ci;
+ struct bio *orig_bio = NULL;
int error = 0;
init_clone_info(&ci, md, map, bio);
if (bio->bi_opf & REQ_PREFLUSH) {
- error = __send_empty_flush(&ci);
- /* dm_io_dec_pending submits any data associated with flush */
- } else if (op_is_zone_mgmt(bio_op(bio))) {
- ci.bio = bio;
- ci.sector_count = 0;
- error = __split_and_process_non_flush(&ci);
- } else {
- ci.bio = bio;
- ci.sector_count = bio_sectors(bio);
- error = __split_and_process_non_flush(&ci);
- if (ci.sector_count && !error) {
- /*
- * Remainder must be passed to submit_bio_noacct()
- * so that it gets handled *after* bios already submitted
- * have been completely processed.
- * We take a clone of the original to store in
- * ci.io->orig_bio to be used by end_io_acct() and
- * for dec_pending to use for completion handling.
- */
- struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
- GFP_NOIO, &md->queue->bio_split);
- ci.io->orig_bio = b;
-
- /*
- * Adjust IO stats for each split, otherwise upon queue
- * reentry there will be redundant IO accounting.
- * NOTE: this is a stop-gap fix, a proper fix involves
- * significant refactoring of DM core's bio splitting
- * (by eliminating DM's splitting and just using bio_split)
- */
- part_stat_lock();
- __dm_part_stat_sub(dm_disk(md)->part0,
- sectors[op_stat_group(bio_op(bio))], ci.sector_count);
- part_stat_unlock();
-
- bio_chain(b, bio);
- trace_block_split(b, bio->bi_iter.bi_sector);
- submit_bio_noacct(bio);
- }
+ __send_empty_flush(&ci);
+ /* dm_io_complete submits any data associated with flush */
+ goto out;
}
- /* drop the extra reference count */
- dm_io_dec_pending(ci.io, errno_to_blk_status(error));
+ error = __split_and_process_bio(&ci);
+ ci.io->map_task = NULL;
+ if (error || !ci.sector_count)
+ goto out;
+
+ /*
+ * Remainder must be passed to submit_bio_noacct() so it gets handled
+ * *after* bios already submitted have been completely processed.
+ * We take a clone of the original to store in ci.io->orig_bio to be
+ * used by dm_end_io_acct() and for dm_io_complete() to use for
+ * completion handling.
+ */
+ orig_bio = bio_split(bio, bio_sectors(bio) - ci.sector_count,
+ GFP_NOIO, &md->queue->bio_split);
+ bio_chain(orig_bio, bio);
+ trace_block_split(orig_bio, bio->bi_iter.bi_sector);
+ submit_bio_noacct(bio);
+out:
+ if (!orig_bio)
+ orig_bio = bio;
+ smp_store_release(&ci.io->orig_bio, orig_bio);
+ if (dm_io_flagged(ci.io, DM_IO_START_ACCT))
+ dm_start_io_acct(ci.io, NULL);
+
+ /*
+ * Drop the extra reference count for non-POLLED bio, and hold one
+ * reference for POLLED bio, which will be released in dm_poll_bio
+ *
+ * Add every dm_io instance into the hlist_head which is stored in
+ * bio->bi_private, so that dm_poll_bio can poll them all.
+ */
+ if (error || !ci.submit_as_polled)
+ dm_io_dec_pending(ci.io, errno_to_blk_status(error));
+ else
+ dm_queue_poll_io(bio, ci.io);
}
static void dm_submit_bio(struct bio *bio)
@@ -1494,15 +1611,10 @@ static void dm_submit_bio(struct bio *bio)
struct dm_table *map;
map = dm_get_live_table(md, &srcu_idx);
- if (unlikely(!map)) {
- DMERR_LIMIT("%s: mapping table unavailable, erroring io",
- dm_device_name(md));
- bio_io_error(bio);
- goto out;
- }
- /* If suspended, queue this IO for later */
- if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
+ /* If suspended, or map not yet available, queue this IO for later */
+ if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) ||
+ unlikely(!map)) {
if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio);
else if (bio->bi_opf & REQ_RAHEAD)
@@ -1519,11 +1631,72 @@ static void dm_submit_bio(struct bio *bio)
if (is_abnormal_io(bio))
blk_queue_split(&bio);
- __split_and_process_bio(md, map, bio);
+ dm_split_and_process_bio(md, map, bio);
out:
dm_put_live_table(md, srcu_idx);
}
+static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob,
+ unsigned int flags)
+{
+ WARN_ON_ONCE(!dm_tio_is_normal(&io->tio));
+
+ /* don't poll if the mapped io is done */
+ if (atomic_read(&io->io_count) > 1)
+ bio_poll(&io->tio.clone, iob, flags);
+
+ /* bio_poll holds the last reference */
+ return atomic_read(&io->io_count) == 1;
+}
+
+static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob,
+ unsigned int flags)
+{
+ struct hlist_head *head = dm_get_bio_hlist_head(bio);
+ struct hlist_head tmp = HLIST_HEAD_INIT;
+ struct hlist_node *next;
+ struct dm_io *io;
+
+ /* Only poll normal bio which was marked as REQ_DM_POLL_LIST */
+ if (!(bio->bi_opf & REQ_DM_POLL_LIST))
+ return 0;
+
+ WARN_ON_ONCE(hlist_empty(head));
+
+ hlist_move_list(head, &tmp);
+
+ /*
+ * Restore .bi_private before possibly completing dm_io.
+ *
+ * bio_poll() is only possible once @bio has been completely
+ * submitted via submit_bio_noacct()'s depth-first submission.
+ * So there is no dm_queue_poll_io() race associated with
+ * clearing REQ_DM_POLL_LIST here.
+ */
+ bio->bi_opf &= ~REQ_DM_POLL_LIST;
+ bio->bi_private = hlist_entry(tmp.first, struct dm_io, node)->data;
+
+ hlist_for_each_entry_safe(io, next, &tmp, node) {
+ if (dm_poll_dm_io(io, iob, flags)) {
+ hlist_del_init(&io->node);
+ /*
+ * clone_endio() has already occurred, so passing
+ * error as 0 here doesn't override io->status
+ */
+ dm_io_dec_pending(io, 0);
+ }
+ }
+
+ /* Not done? */
+ if (!hlist_empty(&tmp)) {
+ bio->bi_opf |= REQ_DM_POLL_LIST;
+ /* Reset bio->bi_private to dm_io list head */
+ hlist_move_list(&tmp, head);
+ return 0;
+ }
+ return 1;
+}
+
/*-----------------------------------------------------------------
* An IDR is used to keep track of allocated minor numbers.
*---------------------------------------------------------------*/
@@ -1606,6 +1779,7 @@ static void cleanup_mapped_device(struct mapped_device *md)
md->dax_dev = NULL;
}
+ dm_cleanup_zoned_dev(md);
if (md->disk) {
spin_lock(&_minor_lock);
md->disk->private_data = NULL;
@@ -1618,6 +1792,11 @@ static void cleanup_mapped_device(struct mapped_device *md)
blk_cleanup_disk(md->disk);
}
+ if (md->pending_io) {
+ free_percpu(md->pending_io);
+ md->pending_io = NULL;
+ }
+
cleanup_srcu_struct(&md->io_barrier);
mutex_destroy(&md->suspend_lock);
@@ -1626,7 +1805,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
mutex_destroy(&md->swap_bios_lock);
dm_mq_cleanup_mapped_device(md);
- dm_cleanup_zoned_dev(md);
}
/*
@@ -1720,6 +1898,10 @@ static struct mapped_device *alloc_dev(int minor)
if (!md->wq)
goto bad;
+ md->pending_io = alloc_percpu(unsigned long);
+ if (!md->pending_io)
+ goto bad;
+
dm_stats_init(&md->stats);
/* Populate the mapping, nobody knows we exist yet */
@@ -1829,8 +2011,6 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
struct queue_limits *limits)
{
struct dm_table *old_map;
- struct request_queue *q = md->queue;
- bool request_based = dm_table_request_based(t);
sector_t size;
int ret;
@@ -1851,7 +2031,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
dm_table_event_callback(t, event_callback, md);
- if (request_based) {
+ if (dm_table_request_based(t)) {
/*
* Leverage the fact that request-based DM targets are
* immutable singletons - used to optimize dm_mq_queue_rq.
@@ -1865,7 +2045,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
goto out;
}
- ret = dm_table_set_restrictions(t, q, limits);
+ ret = dm_table_set_restrictions(t, md->queue, limits);
if (ret) {
old_map = ERR_PTR(ret);
goto out;
@@ -1877,7 +2057,6 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
if (old_map)
dm_sync_table(md);
-
out:
return old_map;
}
@@ -2076,7 +2255,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock);
- blk_set_queue_dying(md->queue);
+ blk_mark_disk_dead(md->disk);
/*
* Take suspend_lock so that presuspend and postsuspend methods
@@ -2127,16 +2306,13 @@ void dm_put(struct mapped_device *md)
}
EXPORT_SYMBOL_GPL(dm_put);
-static bool md_in_flight_bios(struct mapped_device *md)
+static bool dm_in_flight_bios(struct mapped_device *md)
{
int cpu;
- struct block_device *part = dm_disk(md)->part0;
- long sum = 0;
+ unsigned long sum = 0;
- for_each_possible_cpu(cpu) {
- sum += part_stat_local_read_cpu(part, in_flight[0], cpu);
- sum += part_stat_local_read_cpu(part, in_flight[1], cpu);
- }
+ for_each_possible_cpu(cpu)
+ sum += *per_cpu_ptr(md->pending_io, cpu);
return sum != 0;
}
@@ -2149,7 +2325,7 @@ static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int ta
while (true) {
prepare_to_wait(&md->wait, &wait, task_state);
- if (!md_in_flight_bios(md))
+ if (!dm_in_flight_bios(md))
break;
if (signal_pending_state(task_state, current)) {
@@ -2161,6 +2337,8 @@ static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int ta
}
finish_wait(&md->wait, &wait);
+ smp_rmb();
+
return r;
}
@@ -2332,11 +2510,11 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
/*
* Here we must make sure that no processes are submitting requests
* to target drivers i.e. no one may be executing
- * __split_and_process_bio from dm_submit_bio.
+ * dm_split_and_process_bio from dm_submit_bio.
*
- * To get all processes out of __split_and_process_bio in dm_submit_bio,
+ * To get all processes out of dm_split_and_process_bio in dm_submit_bio,
* we take the write lock. To prevent any process from reentering
- * __split_and_process_bio from dm_submit_bio and quiesce the thread
+ * dm_split_and_process_bio from dm_submit_bio and quiesce the thread
* (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND and call
* flush_workqueue(md->wq).
*/
@@ -2944,6 +3122,7 @@ static const struct pr_ops dm_pr_ops = {
static const struct block_device_operations dm_blk_dops = {
.submit_bio = dm_submit_bio,
+ .poll_bio = dm_poll_bio,
.open = dm_blk_open,
.release = dm_blk_close,
.ioctl = dm_blk_ioctl,
diff --git a/drivers/md/md-faulty.c b/drivers/md/md-faulty.c
index c0dc6f2ef4a3..50ad818978a4 100644
--- a/drivers/md/md-faulty.c
+++ b/drivers/md/md-faulty.c
@@ -205,9 +205,9 @@ static bool faulty_make_request(struct mddev *mddev, struct bio *bio)
}
}
if (failit) {
- struct bio *b = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
+ struct bio *b = bio_alloc_clone(conf->rdev->bdev, bio, GFP_NOIO,
+ &mddev->bio_set);
- bio_set_dev(b, conf->rdev->bdev);
b->bi_private = bio;
b->bi_end_io = faulty_fail;
bio = b;
diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c
index ae0b0a9f2a98..1c6dbf92c136 100644
--- a/drivers/md/md-multipath.c
+++ b/drivers/md/md-multipath.c
@@ -121,11 +121,9 @@ static bool multipath_make_request(struct mddev *mddev, struct bio * bio)
}
multipath = conf->multipaths + mp_bh->path;
- bio_init(&mp_bh->bio, NULL, 0);
- __bio_clone_fast(&mp_bh->bio, bio);
+ bio_init_clone(multipath->rdev->bdev, &mp_bh->bio, bio, GFP_NOIO);
mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
- bio_set_dev(&mp_bh->bio, multipath->rdev->bdev);
mp_bh->bio.bi_opf |= REQ_FAILFAST_TRANSPORT;
mp_bh->bio.bi_end_io = multipath_end_request;
mp_bh->bio.bi_private = mp_bh;
@@ -298,7 +296,6 @@ static void multipathd(struct md_thread *thread)
md_check_recovery(mddev);
for (;;) {
- char b[BDEVNAME_SIZE];
spin_lock_irqsave(&conf->device_lock, flags);
if (list_empty(head))
break;
@@ -310,13 +307,13 @@ static void multipathd(struct md_thread *thread)
bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector;
if ((mp_bh->path = multipath_map (conf))<0) {
- pr_err("multipath: %s: unrecoverable IO read error for block %llu\n",
- bio_devname(bio, b),
+ pr_err("multipath: %pg: unrecoverable IO read error for block %llu\n",
+ bio->bi_bdev,
(unsigned long long)bio->bi_iter.bi_sector);
multipath_end_bh_io(mp_bh, BLK_STS_IOERR);
} else {
- pr_err("multipath: %s: redirecting sector %llu to another IO path\n",
- bio_devname(bio, b),
+ pr_err("multipath: %pg: redirecting sector %llu to another IO path\n",
+ bio->bi_bdev,
(unsigned long long)bio->bi_iter.bi_sector);
*bio = *(mp_bh->master_bio);
bio->bi_iter.bi_sector +=
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 5881d05a76eb..309b3af906ad 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -562,11 +562,11 @@ static void submit_flushes(struct work_struct *ws)
atomic_inc(&rdev->nr_pending);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
- bi = bio_alloc_bioset(GFP_NOIO, 0, &mddev->bio_set);
+ bi = bio_alloc_bioset(rdev->bdev, 0,
+ REQ_OP_WRITE | REQ_PREFLUSH,
+ GFP_NOIO, &mddev->bio_set);
bi->bi_end_io = md_end_flush;
bi->bi_private = rdev;
- bio_set_dev(bi, rdev->bdev);
- bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
atomic_inc(&mddev->flush_pending);
submit_bio(bi);
rcu_read_lock();
@@ -955,7 +955,6 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
* If an error occurred, call md_error
*/
struct bio *bio;
- int ff = 0;
if (!page)
return;
@@ -963,11 +962,13 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
if (test_bit(Faulty, &rdev->flags))
return;
- bio = bio_alloc_bioset(GFP_NOIO, 1, &mddev->sync_set);
+ bio = bio_alloc_bioset(rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev,
+ 1,
+ REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA,
+ GFP_NOIO, &mddev->sync_set);
atomic_inc(&rdev->nr_pending);
- bio_set_dev(bio, rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev);
bio->bi_iter.bi_sector = sector;
bio_add_page(bio, page, size, 0);
bio->bi_private = rdev;
@@ -976,8 +977,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) &&
test_bit(FailFast, &rdev->flags) &&
!test_bit(LastDev, &rdev->flags))
- ff = MD_FAILFAST;
- bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA | ff;
+ bio->bi_opf |= MD_FAILFAST;
atomic_inc(&mddev->pending_writes);
submit_bio(bio);
@@ -998,13 +998,11 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct bio bio;
struct bio_vec bvec;
- bio_init(&bio, &bvec, 1);
-
if (metadata_op && rdev->meta_bdev)
- bio_set_dev(&bio, rdev->meta_bdev);
+ bio_init(&bio, rdev->meta_bdev, &bvec, 1, op | op_flags);
else
- bio_set_dev(&bio, rdev->bdev);
- bio.bi_opf = op | op_flags;
+ bio_init(&bio, rdev->bdev, &bvec, 1, op | op_flags);
+
if (metadata_op)
bio.bi_iter.bi_sector = sector + rdev->sb_start;
else if (rdev->mddev->reshape_position != MaxSector &&
@@ -5869,10 +5867,6 @@ int md_run(struct mddev *mddev)
nowait = nowait && blk_queue_nowait(bdev_get_queue(rdev->bdev));
}
- /* Set the NOWAIT flags if all underlying devices support it */
- if (nowait)
- blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue);
-
if (!bioset_initialized(&mddev->bio_set)) {
err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
if (err)
@@ -6010,6 +6004,10 @@ int md_run(struct mddev *mddev)
else
blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
blk_queue_flag_set(QUEUE_FLAG_IO_STAT, mddev->queue);
+
+ /* Set the NOWAIT flags if all underlying devices support it */
+ if (nowait)
+ blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue);
}
if (pers->sync_request) {
if (mddev->kobj.sd &&
@@ -8636,13 +8634,14 @@ static void md_end_io_acct(struct bio *bio)
*/
void md_account_bio(struct mddev *mddev, struct bio **bio)
{
+ struct block_device *bdev = (*bio)->bi_bdev;
struct md_io_acct *md_io_acct;
struct bio *clone;
- if (!blk_queue_io_stat((*bio)->bi_bdev->bd_disk->queue))
+ if (!blk_queue_io_stat(bdev->bd_disk->queue))
return;
- clone = bio_clone_fast(*bio, GFP_NOIO, &mddev->io_acct_set);
+ clone = bio_alloc_clone(bdev, *bio, GFP_NOIO, &mddev->io_acct_set);
md_io_acct = container_of(clone, struct md_io_acct, bio_clone);
md_io_acct->orig_bio = *bio;
md_io_acct->start_time = bio_start_io_acct(*bio);
@@ -9583,7 +9582,7 @@ static int md_notify_reboot(struct notifier_block *this,
* driver, we do want to have a safe RAID driver ...
*/
if (need_delay)
- mdelay(1000*1);
+ msleep(1000);
return NOTIFY_DONE;
}
diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
index 83f9a4f3d82e..e61f6cad4e08 100644
--- a/drivers/md/raid1-10.c
+++ b/drivers/md/raid1-10.c
@@ -28,6 +28,11 @@ struct resync_pages {
struct page *pages[RESYNC_PAGES];
};
+struct raid1_plug_cb {
+ struct blk_plug_cb cb;
+ struct bio_list pending;
+};
+
static void rbio_pool_free(void *rbio, void *data)
{
kfree(rbio);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 203ec58f0d3e..800a17aad6e6 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -824,7 +824,6 @@ static void flush_pending_writes(struct r1conf *conf)
struct bio *bio;
bio = bio_list_get(&conf->pending_bio_list);
- conf->pending_count = 0;
spin_unlock_irq(&conf->device_lock);
/*
@@ -1126,7 +1125,8 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio,
int i = 0;
struct bio *behind_bio = NULL;
- behind_bio = bio_alloc_bioset(GFP_NOIO, vcnt, &r1_bio->mddev->bio_set);
+ behind_bio = bio_alloc_bioset(NULL, vcnt, 0, GFP_NOIO,
+ &r1_bio->mddev->bio_set);
if (!behind_bio)
return;
@@ -1166,12 +1166,6 @@ free_pages:
bio_put(behind_bio);
}
-struct raid1_plug_cb {
- struct blk_plug_cb cb;
- struct bio_list pending;
- int pending_cnt;
-};
-
static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
{
struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
@@ -1183,7 +1177,6 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
if (from_schedule || current->bio_list) {
spin_lock_irq(&conf->device_lock);
bio_list_merge(&conf->pending_bio_list, &plug->pending);
- conf->pending_count += plug->pending_cnt;
spin_unlock_irq(&conf->device_lock);
wake_up(&conf->wait_barrier);
md_wakeup_thread(mddev->thread);
@@ -1319,13 +1312,13 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
if (!r1bio_existed && blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
r1_bio->start_time = bio_start_io_acct(bio);
- read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
+ read_bio = bio_alloc_clone(mirror->rdev->bdev, bio, gfp,
+ &mddev->bio_set);
r1_bio->bios[rdisk] = read_bio;
read_bio->bi_iter.bi_sector = r1_bio->sector +
mirror->rdev->data_offset;
- bio_set_dev(read_bio, mirror->rdev->bdev);
read_bio->bi_end_io = raid1_end_read_request;
bio_set_op_attrs(read_bio, op, do_sync);
if (test_bit(FailFast, &mirror->rdev->flags) &&
@@ -1545,24 +1538,25 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
first_clone = 0;
}
- if (r1_bio->behind_master_bio)
- mbio = bio_clone_fast(r1_bio->behind_master_bio,
- GFP_NOIO, &mddev->bio_set);
- else
- mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
-
if (r1_bio->behind_master_bio) {
+ mbio = bio_alloc_clone(rdev->bdev,
+ r1_bio->behind_master_bio,
+ GFP_NOIO, &mddev->bio_set);
if (test_bit(CollisionCheck, &rdev->flags))
wait_for_serialization(rdev, r1_bio);
if (test_bit(WriteMostly, &rdev->flags))
atomic_inc(&r1_bio->behind_remaining);
- } else if (mddev->serialize_policy)
- wait_for_serialization(rdev, r1_bio);
+ } else {
+ mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO,
+ &mddev->bio_set);
+
+ if (mddev->serialize_policy)
+ wait_for_serialization(rdev, r1_bio);
+ }
r1_bio->bios[i] = mbio;
mbio->bi_iter.bi_sector = (r1_bio->sector + rdev->data_offset);
- bio_set_dev(mbio, rdev->bdev);
mbio->bi_end_io = raid1_end_write_request;
mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA));
if (test_bit(FailFast, &rdev->flags) &&
@@ -1586,11 +1580,9 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
plug = NULL;
if (plug) {
bio_list_add(&plug->pending, mbio);
- plug->pending_cnt++;
} else {
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
- conf->pending_count++;
spin_unlock_irqrestore(&conf->device_lock, flags);
md_wakeup_thread(mddev->thread);
}
@@ -2070,15 +2062,14 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
} while (!success && d != r1_bio->read_disk);
if (!success) {
- char b[BDEVNAME_SIZE];
int abort = 0;
/* Cannot read from anywhere, this block is lost.
* Record a bad block on each device. If that doesn't
* work just disable and interrupt the recovery.
* Don't fail devices as that won't really help.
*/
- pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
- mdname(mddev), bio_devname(bio, b),
+ pr_crit_ratelimited("md/raid1:%s: %pg: unrecoverable I/O read error for block %llu\n",
+ mdname(mddev), bio->bi_bdev,
(unsigned long long)r1_bio->sector);
for (d = 0; d < conf->raid_disks * 2; d++) {
rdev = conf->mirrors[d].rdev;
@@ -2165,11 +2156,10 @@ static void process_checks(struct r1bio *r1_bio)
continue;
/* fixup the bio for reuse, but preserve errno */
status = b->bi_status;
- bio_reset(b);
+ bio_reset(b, conf->mirrors[i].rdev->bdev, REQ_OP_READ);
b->bi_status = status;
b->bi_iter.bi_sector = r1_bio->sector +
conf->mirrors[i].rdev->data_offset;
- bio_set_dev(b, conf->mirrors[i].rdev->bdev);
b->bi_end_io = end_sync_read;
rp->raid_bio = r1_bio;
b->bi_private = rp;
@@ -2416,12 +2406,12 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
/* Write at 'sector' for 'sectors'*/
if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
- wbio = bio_clone_fast(r1_bio->behind_master_bio,
- GFP_NOIO,
- &mddev->bio_set);
+ wbio = bio_alloc_clone(rdev->bdev,
+ r1_bio->behind_master_bio,
+ GFP_NOIO, &mddev->bio_set);
} else {
- wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO,
- &mddev->bio_set);
+ wbio = bio_alloc_clone(rdev->bdev, r1_bio->master_bio,
+ GFP_NOIO, &mddev->bio_set);
}
bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
@@ -2430,7 +2420,6 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
bio_trim(wbio, sector - r1_bio->sector, sectors);
wbio->bi_iter.bi_sector += rdev->data_offset;
- bio_set_dev(wbio, rdev->bdev);
if (submit_bio_wait(wbio) < 0)
/* failure! */
@@ -2650,7 +2639,7 @@ static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
for (i = conf->poolinfo->raid_disks; i--; ) {
bio = r1bio->bios[i];
rps = bio->bi_private;
- bio_reset(bio);
+ bio_reset(bio, NULL, 0);
bio->bi_private = rps;
}
r1bio->master_bio = NULL;
@@ -3058,7 +3047,6 @@ static struct r1conf *setup_conf(struct mddev *mddev)
init_waitqueue_head(&conf->wait_barrier);
bio_list_init(&conf->pending_bio_list);
- conf->pending_count = 0;
conf->recovery_disabled = mddev->recovery_disabled - 1;
err = -EIO;
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index ccf10e59b116..ebb6788820e7 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -87,7 +87,6 @@ struct r1conf {
/* queue pending writes to be submitted on unplug */
struct bio_list pending_bio_list;
- int pending_count;
/* for use when syncing mirrors:
* We don't allow both normal IO and resync/recovery IO at
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 4086273497c6..dfe7d62d3fbd 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -861,7 +861,6 @@ static void flush_pending_writes(struct r10conf *conf)
struct bio *bio;
bio = bio_list_get(&conf->pending_bio_list);
- conf->pending_count = 0;
spin_unlock_irq(&conf->device_lock);
/*
@@ -1054,16 +1053,9 @@ static sector_t choose_data_offset(struct r10bio *r10_bio,
return rdev->new_data_offset;
}
-struct raid10_plug_cb {
- struct blk_plug_cb cb;
- struct bio_list pending;
- int pending_cnt;
-};
-
static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
{
- struct raid10_plug_cb *plug = container_of(cb, struct raid10_plug_cb,
- cb);
+ struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb, cb);
struct mddev *mddev = plug->cb.data;
struct r10conf *conf = mddev->private;
struct bio *bio;
@@ -1071,7 +1063,6 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
if (from_schedule || current->bio_list) {
spin_lock_irq(&conf->device_lock);
bio_list_merge(&conf->pending_bio_list, &plug->pending);
- conf->pending_count += plug->pending_cnt;
spin_unlock_irq(&conf->device_lock);
wake_up(&conf->wait_barrier);
md_wakeup_thread(mddev->thread);
@@ -1208,14 +1199,13 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
r10_bio->start_time = bio_start_io_acct(bio);
- read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
+ read_bio = bio_alloc_clone(rdev->bdev, bio, gfp, &mddev->bio_set);
r10_bio->devs[slot].bio = read_bio;
r10_bio->devs[slot].rdev = rdev;
read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
choose_data_offset(r10_bio, rdev);
- bio_set_dev(read_bio, rdev->bdev);
read_bio->bi_end_io = raid10_end_read_request;
bio_set_op_attrs(read_bio, op, do_sync);
if (test_bit(FailFast, &rdev->flags) &&
@@ -1239,7 +1229,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
const unsigned long do_fua = (bio->bi_opf & REQ_FUA);
unsigned long flags;
struct blk_plug_cb *cb;
- struct raid10_plug_cb *plug = NULL;
+ struct raid1_plug_cb *plug = NULL;
struct r10conf *conf = mddev->private;
struct md_rdev *rdev;
int devnum = r10_bio->devs[n_copy].devnum;
@@ -1255,7 +1245,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
} else
rdev = conf->mirrors[devnum].rdev;
- mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
+ mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO, &mddev->bio_set);
if (replacement)
r10_bio->devs[n_copy].repl_bio = mbio;
else
@@ -1263,7 +1253,6 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr +
choose_data_offset(r10_bio, rdev));
- bio_set_dev(mbio, rdev->bdev);
mbio->bi_end_io = raid10_end_write_request;
bio_set_op_attrs(mbio, op, do_sync | do_fua);
if (!replacement && test_bit(FailFast,
@@ -1282,16 +1271,14 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug));
if (cb)
- plug = container_of(cb, struct raid10_plug_cb, cb);
+ plug = container_of(cb, struct raid1_plug_cb, cb);
else
plug = NULL;
if (plug) {
bio_list_add(&plug->pending, mbio);
- plug->pending_cnt++;
} else {
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
- conf->pending_count++;
spin_unlock_irqrestore(&conf->device_lock, flags);
md_wakeup_thread(mddev->thread);
}
@@ -1812,7 +1799,8 @@ retry_discard:
*/
if (r10_bio->devs[disk].bio) {
struct md_rdev *rdev = conf->mirrors[disk].rdev;
- mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
+ mbio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
+ &mddev->bio_set);
mbio->bi_end_io = raid10_end_discard_request;
mbio->bi_private = r10_bio;
r10_bio->devs[disk].bio = mbio;
@@ -1825,7 +1813,8 @@ retry_discard:
}
if (r10_bio->devs[disk].repl_bio) {
struct md_rdev *rrdev = conf->mirrors[disk].replacement;
- rbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
+ rbio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
+ &mddev->bio_set);
rbio->bi_end_io = raid10_end_discard_request;
rbio->bi_private = r10_bio;
r10_bio->devs[disk].repl_bio = rbio;
@@ -2422,7 +2411,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
* bi_vecs, as the read request might have corrupted these
*/
rp = get_resync_pages(tbio);
- bio_reset(tbio);
+ bio_reset(tbio, conf->mirrors[d].rdev->bdev, REQ_OP_WRITE);
md_bio_reset_resync_pages(tbio, rp, fbio->bi_iter.bi_size);
@@ -2430,7 +2419,6 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
tbio->bi_private = rp;
tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
tbio->bi_end_io = end_sync_write;
- bio_set_op_attrs(tbio, REQ_OP_WRITE, 0);
bio_copy_data(tbio, fbio);
@@ -2441,7 +2429,6 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
tbio->bi_opf |= MD_FAILFAST;
tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
- bio_set_dev(tbio, conf->mirrors[d].rdev->bdev);
submit_bio_noacct(tbio);
}
@@ -2894,12 +2881,12 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
if (sectors > sect_to_write)
sectors = sect_to_write;
/* Write at 'sector' for 'sectors' */
- wbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
+ wbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO,
+ &mddev->bio_set);
bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector);
wbio->bi_iter.bi_sector = wsector +
choose_data_offset(r10_bio, rdev);
- bio_set_dev(wbio, rdev->bdev);
bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
if (submit_bio_wait(wbio) < 0)
@@ -3160,12 +3147,12 @@ static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf)
for (i = 0; i < nalloc; i++) {
bio = r10bio->devs[i].bio;
rp = bio->bi_private;
- bio_reset(bio);
+ bio_reset(bio, NULL, 0);
bio->bi_private = rp;
bio = r10bio->devs[i].repl_bio;
if (bio) {
rp = bio->bi_private;
- bio_reset(bio);
+ bio_reset(bio, NULL, 0);
bio->bi_private = rp;
}
}
@@ -4891,14 +4878,12 @@ read_more:
return sectors_done;
}
- read_bio = bio_alloc_bioset(GFP_KERNEL, RESYNC_PAGES, &mddev->bio_set);
-
- bio_set_dev(read_bio, rdev->bdev);
+ read_bio = bio_alloc_bioset(rdev->bdev, RESYNC_PAGES, REQ_OP_READ,
+ GFP_KERNEL, &mddev->bio_set);
read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
+ rdev->data_offset);
read_bio->bi_private = r10_bio;
read_bio->bi_end_io = end_reshape_read;
- bio_set_op_attrs(read_bio, REQ_OP_READ, 0);
r10_bio->master_bio = read_bio;
r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index c34bb196790e..5c0804d8bb1f 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -75,7 +75,6 @@ struct r10conf {
/* queue pending writes and submit them on unplug */
struct bio_list pending_bio_list;
- int pending_count;
spinlock_t resync_lock;
atomic_t nr_pending;
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 0b5dcaabbc15..a7d50ff9020a 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -735,10 +735,9 @@ static void r5l_submit_current_io(struct r5l_log *log)
static struct bio *r5l_bio_alloc(struct r5l_log *log)
{
- struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_VECS, &log->bs);
+ struct bio *bio = bio_alloc_bioset(log->rdev->bdev, BIO_MAX_VECS,
+ REQ_OP_WRITE, GFP_NOIO, &log->bs);
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
- bio_set_dev(bio, log->rdev->bdev);
bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
return bio;
@@ -1267,6 +1266,8 @@ static void r5l_log_flush_endio(struct bio *bio)
r5l_io_run_stripes(io);
list_splice_tail_init(&log->flushing_ios, &log->finished_ios);
spin_unlock_irqrestore(&log->io_list_lock, flags);
+
+ bio_uninit(bio);
}
/*
@@ -1302,10 +1303,9 @@ void r5l_flush_stripe_to_raid(struct r5l_log *log)
if (!do_flush)
return;
- bio_reset(&log->flush_bio);
- bio_set_dev(&log->flush_bio, log->rdev->bdev);
+ bio_init(&log->flush_bio, log->rdev->bdev, NULL, 0,
+ REQ_OP_WRITE | REQ_PREFLUSH);
log->flush_bio.bi_end_io = r5l_log_flush_endio;
- log->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
submit_bio(&log->flush_bio);
}
@@ -1623,10 +1623,10 @@ struct r5l_recovery_ctx {
* just copy data from the pool.
*/
struct page *ra_pool[R5L_RECOVERY_PAGE_POOL_SIZE];
+ struct bio_vec ra_bvec[R5L_RECOVERY_PAGE_POOL_SIZE];
sector_t pool_offset; /* offset of first page in the pool */
int total_pages; /* total allocated pages */
int valid_pages; /* pages with valid data */
- struct bio *ra_bio; /* bio to do the read ahead */
};
static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
@@ -1634,10 +1634,6 @@ static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
{
struct page *page;
- ctx->ra_bio = bio_alloc_bioset(GFP_KERNEL, BIO_MAX_VECS, &log->bs);
- if (!ctx->ra_bio)
- return -ENOMEM;
-
ctx->valid_pages = 0;
ctx->total_pages = 0;
while (ctx->total_pages < R5L_RECOVERY_PAGE_POOL_SIZE) {
@@ -1649,10 +1645,8 @@ static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
ctx->total_pages += 1;
}
- if (ctx->total_pages == 0) {
- bio_put(ctx->ra_bio);
+ if (ctx->total_pages == 0)
return -ENOMEM;
- }
ctx->pool_offset = 0;
return 0;
@@ -1665,7 +1659,6 @@ static void r5l_recovery_free_ra_pool(struct r5l_log *log,
for (i = 0; i < ctx->total_pages; ++i)
put_page(ctx->ra_pool[i]);
- bio_put(ctx->ra_bio);
}
/*
@@ -1678,17 +1671,19 @@ static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
struct r5l_recovery_ctx *ctx,
sector_t offset)
{
- bio_reset(ctx->ra_bio);
- bio_set_dev(ctx->ra_bio, log->rdev->bdev);
- bio_set_op_attrs(ctx->ra_bio, REQ_OP_READ, 0);
- ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset;
+ struct bio bio;
+ int ret;
+
+ bio_init(&bio, log->rdev->bdev, ctx->ra_bvec,
+ R5L_RECOVERY_PAGE_POOL_SIZE, REQ_OP_READ);
+ bio.bi_iter.bi_sector = log->rdev->data_offset + offset;
ctx->valid_pages = 0;
ctx->pool_offset = offset;
while (ctx->valid_pages < ctx->total_pages) {
- bio_add_page(ctx->ra_bio,
- ctx->ra_pool[ctx->valid_pages], PAGE_SIZE, 0);
+ __bio_add_page(&bio, ctx->ra_pool[ctx->valid_pages], PAGE_SIZE,
+ 0);
ctx->valid_pages += 1;
offset = r5l_ring_add(log, offset, BLOCK_SECTORS);
@@ -1697,7 +1692,9 @@ static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
break;
}
- return submit_bio_wait(ctx->ra_bio);
+ ret = submit_bio_wait(&bio);
+ bio_uninit(&bio);
+ return ret;
}
/*
@@ -3108,7 +3105,6 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
INIT_LIST_HEAD(&log->io_end_ios);
INIT_LIST_HEAD(&log->flushing_ios);
INIT_LIST_HEAD(&log->finished_ios);
- bio_init(&log->flush_bio, NULL, 0);
log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
if (!log->io_kc)
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index 4ab417915d7f..ea4cd8dd4dc3 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -250,7 +250,8 @@ static struct ppl_io_unit *ppl_new_iounit(struct ppl_log *log,
INIT_LIST_HEAD(&io->stripe_list);
atomic_set(&io->pending_stripes, 0);
atomic_set(&io->pending_flushes, 0);
- bio_init(&io->bio, io->biovec, PPL_IO_INLINE_BVECS);
+ bio_init(&io->bio, log->rdev->bdev, io->biovec, PPL_IO_INLINE_BVECS,
+ REQ_OP_WRITE | REQ_FUA);
pplhdr = page_address(io->header_page);
clear_page(pplhdr);
@@ -416,12 +417,10 @@ static void ppl_log_endio(struct bio *bio)
static void ppl_submit_iounit_bio(struct ppl_io_unit *io, struct bio *bio)
{
- char b[BDEVNAME_SIZE];
-
- pr_debug("%s: seq: %llu size: %u sector: %llu dev: %s\n",
+ pr_debug("%s: seq: %llu size: %u sector: %llu dev: %pg\n",
__func__, io->seq, bio->bi_iter.bi_size,
(unsigned long long)bio->bi_iter.bi_sector,
- bio_devname(bio, b));
+ bio->bi_bdev);
submit_bio(bio);
}
@@ -465,8 +464,6 @@ static void ppl_submit_iounit(struct ppl_io_unit *io)
bio->bi_end_io = ppl_log_endio;
- bio->bi_opf = REQ_OP_WRITE | REQ_FUA;
- bio_set_dev(bio, log->rdev->bdev);
bio->bi_iter.bi_sector = log->next_io_sector;
bio_add_page(bio, io->header_page, PAGE_SIZE, 0);
bio->bi_write_hint = ppl_conf->write_hint;
@@ -496,11 +493,10 @@ static void ppl_submit_iounit(struct ppl_io_unit *io)
if (!bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0)) {
struct bio *prev = bio;
- bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_VECS,
+ bio = bio_alloc_bioset(prev->bi_bdev, BIO_MAX_VECS,
+ prev->bi_opf, GFP_NOIO,
&ppl_conf->bs);
- bio->bi_opf = prev->bi_opf;
bio->bi_write_hint = prev->bi_write_hint;
- bio_copy_dev(bio, prev);
bio->bi_iter.bi_sector = bio_end_sector(prev);
bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0);
@@ -590,9 +586,8 @@ static void ppl_flush_endio(struct bio *bio)
struct ppl_log *log = io->log;
struct ppl_conf *ppl_conf = log->ppl_conf;
struct r5conf *conf = ppl_conf->mddev->private;
- char b[BDEVNAME_SIZE];
- pr_debug("%s: dev: %s\n", __func__, bio_devname(bio, b));
+ pr_debug("%s: dev: %pg\n", __func__, bio->bi_bdev);
if (bio->bi_status) {
struct md_rdev *rdev;
@@ -635,16 +630,14 @@ static void ppl_do_flush(struct ppl_io_unit *io)
if (bdev) {
struct bio *bio;
- char b[BDEVNAME_SIZE];
- bio = bio_alloc_bioset(GFP_NOIO, 0, &ppl_conf->flush_bs);
- bio_set_dev(bio, bdev);
+ bio = bio_alloc_bioset(bdev, 0, GFP_NOIO,
+ REQ_OP_WRITE | REQ_PREFLUSH,
+ &ppl_conf->flush_bs);
bio->bi_private = io;
- bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
bio->bi_end_io = ppl_flush_endio;
- pr_debug("%s: dev: %s\n", __func__,
- bio_devname(bio, b));
+ pr_debug("%s: dev: %ps\n", __func__, bio->bi_bdev);
submit_bio(bio);
flushed_disks++;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 06025f7177e3..afecb1c765bf 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1060,6 +1060,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
int i, disks = sh->disks;
struct stripe_head *head_sh = sh;
struct bio_list pending_bios = BIO_EMPTY_LIST;
+ struct r5dev *dev;
bool should_defer;
might_sleep();
@@ -1094,8 +1095,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
op_flags |= REQ_SYNC;
again:
- bi = &sh->dev[i].req;
- rbi = &sh->dev[i].rreq; /* For writing to replacement */
+ dev = &sh->dev[i];
+ bi = &dev->req;
+ rbi = &dev->rreq; /* For writing to replacement */
rcu_read_lock();
rrdev = rcu_dereference(conf->disks[i].replacement);
@@ -1171,8 +1173,7 @@ again:
set_bit(STRIPE_IO_STARTED, &sh->state);
- bio_set_dev(bi, rdev->bdev);
- bio_set_op_attrs(bi, op, op_flags);
+ bio_init(bi, rdev->bdev, &dev->vec, 1, op | op_flags);
bi->bi_end_io = op_is_write(op)
? raid5_end_write_request
: raid5_end_read_request;
@@ -1238,8 +1239,7 @@ again:
set_bit(STRIPE_IO_STARTED, &sh->state);
- bio_set_dev(rbi, rrdev->bdev);
- bio_set_op_attrs(rbi, op, op_flags);
+ bio_init(rbi, rrdev->bdev, &dev->rvec, 1, op | op_flags);
BUG_ON(!op_is_write(op));
rbi->bi_end_io = raid5_end_write_request;
rbi->bi_private = sh;
@@ -2294,7 +2294,6 @@ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
int disks, struct r5conf *conf)
{
struct stripe_head *sh;
- int i;
sh = kmem_cache_zalloc(sc, gfp);
if (sh) {
@@ -2307,12 +2306,6 @@ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
atomic_set(&sh->count, 1);
sh->raid_conf = conf;
sh->log_start = MaxSector;
- for (i = 0; i < disks; i++) {
- struct r5dev *dev = &sh->dev[i];
-
- bio_init(&dev->req, &dev->vec, 1);
- bio_init(&dev->rreq, &dev->rvec, 1);
- }
if (raid5_has_ppl(conf)) {
sh->ppl_page = alloc_page(gfp);
@@ -2677,7 +2670,6 @@ static void raid5_end_read_request(struct bio * bi)
(unsigned long long)sh->sector, i, atomic_read(&sh->count),
bi->bi_status);
if (i == disks) {
- bio_reset(bi);
BUG();
return;
}
@@ -2785,7 +2777,7 @@ static void raid5_end_read_request(struct bio * bi)
}
}
rdev_dec_pending(rdev, conf->mddev);
- bio_reset(bi);
+ bio_uninit(bi);
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
raid5_release_stripe(sh);
@@ -2823,7 +2815,6 @@ static void raid5_end_write_request(struct bio *bi)
(unsigned long long)sh->sector, i, atomic_read(&sh->count),
bi->bi_status);
if (i == disks) {
- bio_reset(bi);
BUG();
return;
}
@@ -2860,7 +2851,7 @@ static void raid5_end_write_request(struct bio *bi)
if (sh->batch_head && bi->bi_status && !replacement)
set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state);
- bio_reset(bi);
+ bio_uninit(bi);
if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
@@ -5438,14 +5429,14 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
return 0;
}
- align_bio = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->io_acct_set);
+ align_bio = bio_alloc_clone(rdev->bdev, raid_bio, GFP_NOIO,
+ &mddev->io_acct_set);
md_io_acct = container_of(align_bio, struct md_io_acct, bio_clone);
raid_bio->bi_next = (void *)rdev;
if (blk_queue_io_stat(raid_bio->bi_bdev->bd_disk->queue))
md_io_acct->start_time = bio_start_io_acct(raid_bio);
md_io_acct->orig_bio = raid_bio;
- bio_set_dev(align_bio, rdev->bdev);
align_bio->bi_end_io = raid5_align_endio;
align_bio->bi_private = md_io_acct;
align_bio->bi_iter.bi_sector = sector;