summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-08-08 02:38:45 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2016-08-08 02:38:45 +0300
commit857953d72f3744f325de93320cc2673795e9ca89 (patch)
treeb4253224fd74148708bd1fb210e4155e22ee7b63 /drivers
parent635a4ba111e3bd0169fd549b24fe108b1f171713 (diff)
parent1eff9d322a444245c67515edb52bc0eb68374aa8 (diff)
downloadlinux-857953d72f3744f325de93320cc2673795e9ca89.tar.xz
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull more block fixes from Jens Axboe: "As mentioned in the pull the other day, a few more fixes for this round, all related to the bio op changes in this series. Two fixes, and then a cleanup, renaming bio->bi_rw to bio->bi_opf. I wanted to do that change right after or right before -rc1, so that risk of conflict was reduced. I just rebased the series on top of current master, and no new ->bi_rw usage has snuck in" * 'for-linus' of git://git.kernel.dk/linux-block: block: rename bio bi_rw to bi_opf target: iblock_execute_sync_cache() should use bio_set_op_attrs() mm: make __swap_writepage() use bio_set_op_attrs() block/mm: make bdev_ops->rw_page() take a bool for read/write
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/brd.c16
-rw-r--r--drivers/block/drbd/drbd_main.c8
-rw-r--r--drivers/block/drbd/drbd_receiver.c2
-rw-r--r--drivers/block/drbd/drbd_req.c6
-rw-r--r--drivers/block/drbd/drbd_worker.c2
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/block/umem.c2
-rw-r--r--drivers/block/zram/zram_drv.c23
-rw-r--r--drivers/md/bcache/request.c12
-rw-r--r--drivers/md/bcache/super.c2
-rw-r--r--drivers/md/bcache/writeback.h2
-rw-r--r--drivers/md/dm-cache-target.c8
-rw-r--r--drivers/md/dm-crypt.c4
-rw-r--r--drivers/md/dm-era-target.c2
-rw-r--r--drivers/md/dm-flakey.c6
-rw-r--r--drivers/md/dm-io.c6
-rw-r--r--drivers/md/dm-log-writes.c4
-rw-r--r--drivers/md/dm-mpath.c2
-rw-r--r--drivers/md/dm-raid1.c10
-rw-r--r--drivers/md/dm-region-hash.c4
-rw-r--r--drivers/md/dm-snap.c6
-rw-r--r--drivers/md/dm-stripe.c4
-rw-r--r--drivers/md/dm-thin.c8
-rw-r--r--drivers/md/dm-zero.c2
-rw-r--r--drivers/md/dm.c10
-rw-r--r--drivers/md/linear.c2
-rw-r--r--drivers/md/md.c4
-rw-r--r--drivers/md/multipath.c8
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c6
-rw-r--r--drivers/md/raid10.c8
-rw-r--r--drivers/md/raid5-cache.c2
-rw-r--r--drivers/md/raid5.c20
-rw-r--r--drivers/nvdimm/btt.c12
-rw-r--r--drivers/nvdimm/pmem.c16
-rw-r--r--drivers/target/target_core_iblock.c2
36 files changed, 118 insertions, 117 deletions
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 3439b28cce8b..0c76d4016eeb 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -300,20 +300,20 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
* Process a single bvec of a bio.
*/
static int brd_do_bvec(struct brd_device *brd, struct page *page,
- unsigned int len, unsigned int off, int op,
+ unsigned int len, unsigned int off, bool is_write,
sector_t sector)
{
void *mem;
int err = 0;
- if (op_is_write(op)) {
+ if (is_write) {
err = copy_to_brd_setup(brd, sector, len);
if (err)
goto out;
}
mem = kmap_atomic(page);
- if (!op_is_write(op)) {
+ if (!is_write) {
copy_from_brd(mem + off, brd, sector, len);
flush_dcache_page(page);
} else {
@@ -350,8 +350,8 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
unsigned int len = bvec.bv_len;
int err;
- err = brd_do_bvec(brd, bvec.bv_page, len,
- bvec.bv_offset, bio_op(bio), sector);
+ err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
+ op_is_write(bio_op(bio)), sector);
if (err)
goto io_error;
sector += len >> SECTOR_SHIFT;
@@ -366,11 +366,11 @@ io_error:
}
static int brd_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, int op)
+ struct page *page, bool is_write)
{
struct brd_device *brd = bdev->bd_disk->private_data;
- int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, op, sector);
- page_endio(page, op, err);
+ int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, is_write, sector);
+ page_endio(page, is_write, err);
return err;
}
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 0501ae0c517b..100be556e613 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1663,13 +1663,13 @@ static u32 bio_flags_to_wire(struct drbd_connection *connection,
struct bio *bio)
{
if (connection->agreed_pro_version >= 95)
- return (bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
- (bio->bi_rw & REQ_FUA ? DP_FUA : 0) |
- (bio->bi_rw & REQ_PREFLUSH ? DP_FLUSH : 0) |
+ return (bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0) |
+ (bio->bi_opf & REQ_FUA ? DP_FUA : 0) |
+ (bio->bi_opf & REQ_PREFLUSH ? DP_FLUSH : 0) |
(bio_op(bio) == REQ_OP_WRITE_SAME ? DP_WSAME : 0) |
(bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0);
else
- return bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
+ return bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0;
}
/* Used to send write or TRIM aka REQ_DISCARD requests
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index df45713dfbe8..942384f34e22 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1564,7 +1564,7 @@ static void drbd_issue_peer_wsame(struct drbd_device *device,
* drbd_submit_peer_request()
* @device: DRBD device.
* @peer_req: peer request
- * @rw: flag field, see bio->bi_rw
+ * @rw: flag field, see bio->bi_opf
*
* May spread the pages to multiple bios,
* depending on bio_add_page restrictions.
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 66b8e4bb74d8..de279fe4e4fd 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -288,7 +288,7 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
*/
if (!ok &&
bio_op(req->master_bio) == REQ_OP_READ &&
- !(req->master_bio->bi_rw & REQ_RAHEAD) &&
+ !(req->master_bio->bi_opf & REQ_RAHEAD) &&
!list_empty(&req->tl_requests))
req->rq_state |= RQ_POSTPONED;
@@ -1137,7 +1137,7 @@ static int drbd_process_write_request(struct drbd_request *req)
* replicating, in which case there is no point. */
if (unlikely(req->i.size == 0)) {
/* The only size==0 bios we expect are empty flushes. */
- D_ASSERT(device, req->master_bio->bi_rw & REQ_PREFLUSH);
+ D_ASSERT(device, req->master_bio->bi_opf & REQ_PREFLUSH);
if (remote)
_req_mod(req, QUEUE_AS_DRBD_BARRIER);
return remote;
@@ -1176,7 +1176,7 @@ drbd_submit_req_private_bio(struct drbd_request *req)
if (bio_op(bio) != REQ_OP_READ)
type = DRBD_FAULT_DT_WR;
- else if (bio->bi_rw & REQ_RAHEAD)
+ else if (bio->bi_opf & REQ_RAHEAD)
type = DRBD_FAULT_DT_RA;
else
type = DRBD_FAULT_DT_RD;
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 35dbb3dca47e..c6755c9a0aea 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -256,7 +256,7 @@ void drbd_request_endio(struct bio *bio)
what = DISCARD_COMPLETED_WITH_ERROR;
break;
case REQ_OP_READ:
- if (bio->bi_rw & REQ_RAHEAD)
+ if (bio->bi_opf & REQ_RAHEAD)
what = READ_AHEAD_COMPLETED_WITH_ERROR;
else
what = READ_COMPLETED_WITH_ERROR;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 9393bc730acf..90fa4ac149db 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1157,7 +1157,7 @@ static int pkt_start_recovery(struct packet_data *pkt)
bio_reset(pkt->bio);
pkt->bio->bi_bdev = pd->bdev;
- pkt->bio->bi_rw = REQ_WRITE;
+ bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0);
pkt->bio->bi_iter.bi_sector = new_sector;
pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
pkt->bio->bi_vcnt = pkt->frames;
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index d0a3e6d4515f..be90e15854ed 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -535,7 +535,7 @@ static blk_qc_t mm_make_request(struct request_queue *q, struct bio *bio)
*card->biotail = bio;
bio->bi_next = NULL;
card->biotail = &bio->bi_next;
- if (bio->bi_rw & REQ_SYNC || !mm_check_plugged(card))
+ if (bio->bi_opf & REQ_SYNC || !mm_check_plugged(card))
activate(card);
spin_unlock_irq(&card->lock);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index ca29649c4b08..04365b17ee67 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -843,15 +843,16 @@ static void zram_bio_discard(struct zram *zram, u32 index,
}
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
- int offset, int op)
+ int offset, bool is_write)
{
unsigned long start_time = jiffies;
+ int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ;
int ret;
- generic_start_io_acct(op, bvec->bv_len >> SECTOR_SHIFT,
+ generic_start_io_acct(rw_acct, bvec->bv_len >> SECTOR_SHIFT,
&zram->disk->part0);
- if (!op_is_write(op)) {
+ if (!is_write) {
atomic64_inc(&zram->stats.num_reads);
ret = zram_bvec_read(zram, bvec, index, offset);
} else {
@@ -859,10 +860,10 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
ret = zram_bvec_write(zram, bvec, index, offset);
}
- generic_end_io_acct(op, &zram->disk->part0, start_time);
+ generic_end_io_acct(rw_acct, &zram->disk->part0, start_time);
if (unlikely(ret)) {
- if (!op_is_write(op))
+ if (!is_write)
atomic64_inc(&zram->stats.failed_reads);
else
atomic64_inc(&zram->stats.failed_writes);
@@ -903,17 +904,17 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
bv.bv_offset = bvec.bv_offset;
if (zram_bvec_rw(zram, &bv, index, offset,
- bio_op(bio)) < 0)
+ op_is_write(bio_op(bio))) < 0)
goto out;
bv.bv_len = bvec.bv_len - max_transfer_size;
bv.bv_offset += max_transfer_size;
if (zram_bvec_rw(zram, &bv, index + 1, 0,
- bio_op(bio)) < 0)
+ op_is_write(bio_op(bio))) < 0)
goto out;
} else
if (zram_bvec_rw(zram, &bvec, index, offset,
- bio_op(bio)) < 0)
+ op_is_write(bio_op(bio))) < 0)
goto out;
update_position(&index, &offset, &bvec);
@@ -970,7 +971,7 @@ static void zram_slot_free_notify(struct block_device *bdev,
}
static int zram_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, int op)
+ struct page *page, bool is_write)
{
int offset, err = -EIO;
u32 index;
@@ -994,7 +995,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
bv.bv_len = PAGE_SIZE;
bv.bv_offset = 0;
- err = zram_bvec_rw(zram, &bv, index, offset, op);
+ err = zram_bvec_rw(zram, &bv, index, offset, is_write);
put_zram:
zram_meta_put(zram);
out:
@@ -1007,7 +1008,7 @@ out:
* (e.g., SetPageError, set_page_dirty and extra works).
*/
if (err == 0)
- page_endio(page, op, 0);
+ page_endio(page, is_write, 0);
return err;
}
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 69f16f43f8ab..4b177fe11ebb 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -208,7 +208,7 @@ static void bch_data_insert_start(struct closure *cl)
* Journal writes are marked REQ_PREFLUSH; if the original write was a
* flush, it'll wait on the journal write.
*/
- bio->bi_rw &= ~(REQ_PREFLUSH|REQ_FUA);
+ bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
do {
unsigned i;
@@ -405,7 +405,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
if (!congested &&
mode == CACHE_MODE_WRITEBACK &&
op_is_write(bio_op(bio)) &&
- (bio->bi_rw & REQ_SYNC))
+ (bio->bi_opf & REQ_SYNC))
goto rescale;
spin_lock(&dc->io_lock);
@@ -668,7 +668,7 @@ static inline struct search *search_alloc(struct bio *bio,
s->iop.write_prio = 0;
s->iop.error = 0;
s->iop.flags = 0;
- s->iop.flush_journal = (bio->bi_rw & (REQ_PREFLUSH|REQ_FUA)) != 0;
+ s->iop.flush_journal = (bio->bi_opf & (REQ_PREFLUSH|REQ_FUA)) != 0;
s->iop.wq = bcache_wq;
return s;
@@ -796,8 +796,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
goto out_submit;
}
- if (!(bio->bi_rw & REQ_RAHEAD) &&
- !(bio->bi_rw & REQ_META) &&
+ if (!(bio->bi_opf & REQ_RAHEAD) &&
+ !(bio->bi_opf & REQ_META) &&
s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
reada = min_t(sector_t, dc->readahead >> 9,
bdev_sectors(bio->bi_bdev) - bio_end_sector(bio));
@@ -920,7 +920,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
bch_writeback_add(dc);
s->iop.bio = bio;
- if (bio->bi_rw & REQ_PREFLUSH) {
+ if (bio->bi_opf & REQ_PREFLUSH) {
/* Also need to send a flush to the backing device */
struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
dc->disk.bio_split);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 88ef6d14cce3..95a4ca6ce6ff 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -347,7 +347,7 @@ static void uuid_io(struct cache_set *c, int op, unsigned long op_flags,
for (i = 0; i < KEY_PTRS(k); i++) {
struct bio *bio = bch_bbio_alloc(c);
- bio->bi_rw = REQ_SYNC|REQ_META|op_flags;
+ bio->bi_opf = REQ_SYNC | REQ_META | op_flags;
bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
bio->bi_end_io = uuid_endio;
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index 073a042aed24..301eaf565167 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -57,7 +57,7 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
if (would_skip)
return false;
- return bio->bi_rw & REQ_SYNC ||
+ return bio->bi_opf & REQ_SYNC ||
in_use <= CUTOFF_WRITEBACK;
}
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 718744db62df..59b2c50562e4 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -788,7 +788,7 @@ static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
spin_lock_irqsave(&cache->lock, flags);
if (cache->need_tick_bio &&
- !(bio->bi_rw & (REQ_FUA | REQ_PREFLUSH)) &&
+ !(bio->bi_opf & (REQ_FUA | REQ_PREFLUSH)) &&
bio_op(bio) != REQ_OP_DISCARD) {
pb->tick = true;
cache->need_tick_bio = false;
@@ -830,7 +830,7 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
static int bio_triggers_commit(struct cache *cache, struct bio *bio)
{
- return bio->bi_rw & (REQ_PREFLUSH | REQ_FUA);
+ return bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
}
/*
@@ -1069,7 +1069,7 @@ static void dec_io_migrations(struct cache *cache)
static bool discard_or_flush(struct bio *bio)
{
return bio_op(bio) == REQ_OP_DISCARD ||
- bio->bi_rw & (REQ_PREFLUSH | REQ_FUA);
+ bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
}
static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell)
@@ -1980,7 +1980,7 @@ static void process_deferred_bios(struct cache *cache)
bio = bio_list_pop(&bios);
- if (bio->bi_rw & REQ_PREFLUSH)
+ if (bio->bi_opf & REQ_PREFLUSH)
process_flush_bio(cache, bio);
else if (bio_op(bio) == REQ_OP_DISCARD)
process_discard_bio(cache, &structs, bio);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 8f2e3e2ffd26..4e9784b4e0ac 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1136,7 +1136,7 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
clone->bi_private = io;
clone->bi_end_io = crypt_endio;
clone->bi_bdev = cc->dev->bdev;
- bio_set_op_attrs(clone, bio_op(io->base_bio), io->base_bio->bi_rw);
+ bio_set_op_attrs(clone, bio_op(io->base_bio), io->base_bio->bi_opf);
}
static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
@@ -1915,7 +1915,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
* - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight
* - for REQ_OP_DISCARD caller must use flush if IO ordering matters
*/
- if (unlikely(bio->bi_rw & REQ_PREFLUSH ||
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH ||
bio_op(bio) == REQ_OP_DISCARD)) {
bio->bi_bdev = cc->dev->bdev;
if (bio_sectors(bio))
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index 2faf49d8f4d7..bf2b2676cb8a 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -1542,7 +1542,7 @@ static int era_map(struct dm_target *ti, struct bio *bio)
/*
* REQ_PREFLUSH bios carry no data, so we're not interested in them.
*/
- if (!(bio->bi_rw & REQ_PREFLUSH) &&
+ if (!(bio->bi_opf & REQ_PREFLUSH) &&
(bio_data_dir(bio) == WRITE) &&
!metadata_current_marked(era->md, block)) {
defer_bio(era, bio);
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 19db13e99466..97e446d54a15 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -16,7 +16,7 @@
#define DM_MSG_PREFIX "flakey"
#define all_corrupt_bio_flags_match(bio, fc) \
- (((bio)->bi_rw & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags)
+ (((bio)->bi_opf & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags)
/*
* Flakey: Used for testing only, simulates intermittent,
@@ -266,9 +266,9 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
data[fc->corrupt_bio_byte - 1] = fc->corrupt_bio_value;
DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
- "(rw=%c bi_rw=%u bi_sector=%llu cur_bytes=%u)\n",
+ "(rw=%c bi_opf=%u bi_sector=%llu cur_bytes=%u)\n",
bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
- (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_rw,
+ (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf,
(unsigned long long)bio->bi_iter.bi_sector, bio_bytes);
}
}
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index daa03e41654a..0bf1a12e35fe 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -505,9 +505,9 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
* New collapsed (a)synchronous interface.
*
* If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
- * the queue with blk_unplug() some time later or set REQ_SYNC in io_req->bi_rw.
- * If you fail to do one of these, the IO will be submitted to the disk after
- * q->unplug_delay, which defaults to 3ms in blk-settings.c.
+ * the queue with blk_unplug() some time later or set REQ_SYNC in
+ * io_req->bi_opf. If you fail to do one of these, the IO will be submitted to
+ * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
*/
int dm_io(struct dm_io_request *io_req, unsigned num_regions,
struct dm_io_region *where, unsigned long *sync_error_bits)
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index b5dbf7a0515e..4ab68033f9d1 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -555,8 +555,8 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio)
struct bio_vec bv;
size_t alloc_size;
int i = 0;
- bool flush_bio = (bio->bi_rw & REQ_PREFLUSH);
- bool fua_bio = (bio->bi_rw & REQ_FUA);
+ bool flush_bio = (bio->bi_opf & REQ_PREFLUSH);
+ bool fua_bio = (bio->bi_opf & REQ_FUA);
bool discard_bio = (bio_op(bio) == REQ_OP_DISCARD);
pb->block = NULL;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index d7107d23b897..ac734e5bbe48 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -661,7 +661,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
bio->bi_error = 0;
bio->bi_bdev = pgpath->path.dev->bdev;
- bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
+ bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
if (pgpath->pg->ps.type->start_io)
pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index dac55b254a09..bdf1606f67bc 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -657,7 +657,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
struct mirror *m;
struct dm_io_request io_req = {
.bi_op = REQ_OP_WRITE,
- .bi_op_flags = bio->bi_rw & WRITE_FLUSH_FUA,
+ .bi_op_flags = bio->bi_opf & WRITE_FLUSH_FUA,
.mem.type = DM_IO_BIO,
.mem.ptr.bio = bio,
.notify.fn = write_callback,
@@ -704,7 +704,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
bio_list_init(&requeue);
while ((bio = bio_list_pop(writes))) {
- if ((bio->bi_rw & REQ_PREFLUSH) ||
+ if ((bio->bi_opf & REQ_PREFLUSH) ||
(bio_op(bio) == REQ_OP_DISCARD)) {
bio_list_add(&sync, bio);
continue;
@@ -1217,7 +1217,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
* If region is not in-sync queue the bio.
*/
if (!r || (r == -EWOULDBLOCK)) {
- if (bio->bi_rw & REQ_RAHEAD)
+ if (bio->bi_opf & REQ_RAHEAD)
return -EWOULDBLOCK;
queue_bio(ms, bio, rw);
@@ -1253,7 +1253,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
* We need to dec pending if this was a write.
*/
if (rw == WRITE) {
- if (!(bio->bi_rw & REQ_PREFLUSH) &&
+ if (!(bio->bi_opf & REQ_PREFLUSH) &&
bio_op(bio) != REQ_OP_DISCARD)
dm_rh_dec(ms->rh, bio_record->write_region);
return error;
@@ -1262,7 +1262,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
if (error == -EOPNOTSUPP)
goto out;
- if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
+ if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD))
goto out;
if (unlikely(error)) {
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index b11813431f31..85c32b22a420 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -398,7 +398,7 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
region_t region = dm_rh_bio_to_region(rh, bio);
int recovering = 0;
- if (bio->bi_rw & REQ_PREFLUSH) {
+ if (bio->bi_opf & REQ_PREFLUSH) {
rh->flush_failure = 1;
return;
}
@@ -526,7 +526,7 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
struct bio *bio;
for (bio = bios->head; bio; bio = bio->bi_next) {
- if (bio->bi_rw & REQ_PREFLUSH || bio_op(bio) == REQ_OP_DISCARD)
+ if (bio->bi_opf & REQ_PREFLUSH || bio_op(bio) == REQ_OP_DISCARD)
continue;
rh_inc(rh, dm_rh_bio_to_region(rh, bio));
}
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index ce2a910709f7..c65feeada864 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1680,7 +1680,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
init_tracked_chunk(bio);
- if (bio->bi_rw & REQ_PREFLUSH) {
+ if (bio->bi_opf & REQ_PREFLUSH) {
bio->bi_bdev = s->cow->bdev;
return DM_MAPIO_REMAPPED;
}
@@ -1800,7 +1800,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
init_tracked_chunk(bio);
- if (bio->bi_rw & REQ_PREFLUSH) {
+ if (bio->bi_opf & REQ_PREFLUSH) {
if (!dm_bio_get_target_bio_nr(bio))
bio->bi_bdev = s->origin->bdev;
else
@@ -2286,7 +2286,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
bio->bi_bdev = o->dev->bdev;
- if (unlikely(bio->bi_rw & REQ_PREFLUSH))
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH))
return DM_MAPIO_REMAPPED;
if (bio_data_dir(bio) != WRITE)
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 83f1d4667195..28193a57bf47 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -286,7 +286,7 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
uint32_t stripe;
unsigned target_bio_nr;
- if (bio->bi_rw & REQ_PREFLUSH) {
+ if (bio->bi_opf & REQ_PREFLUSH) {
target_bio_nr = dm_bio_get_target_bio_nr(bio);
BUG_ON(target_bio_nr >= sc->stripes);
bio->bi_bdev = sc->stripe[target_bio_nr].dev->bdev;
@@ -383,7 +383,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
if (!error)
return 0; /* I/O complete */
- if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
+ if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD))
return error;
if (error == -EOPNOTSUPP)
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 197ea2003400..d1c05c12a9db 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -699,7 +699,7 @@ static void remap_to_origin(struct thin_c *tc, struct bio *bio)
static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
{
- return (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) &&
+ return (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) &&
dm_thin_changed_this_transaction(tc->td);
}
@@ -870,7 +870,7 @@ static void __inc_remap_and_issue_cell(void *context,
struct bio *bio;
while ((bio = bio_list_pop(&cell->bios))) {
- if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) ||
+ if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) ||
bio_op(bio) == REQ_OP_DISCARD)
bio_list_add(&info->defer_bios, bio);
else {
@@ -1717,7 +1717,7 @@ static void __remap_and_issue_shared_cell(void *context,
while ((bio = bio_list_pop(&cell->bios))) {
if ((bio_data_dir(bio) == WRITE) ||
- (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) ||
+ (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) ||
bio_op(bio) == REQ_OP_DISCARD))
bio_list_add(&info->defer_bios, bio);
else {
@@ -2635,7 +2635,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_SUBMITTED;
}
- if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) ||
+ if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) ||
bio_op(bio) == REQ_OP_DISCARD) {
thin_defer_bio_with_throttle(tc, bio);
return DM_MAPIO_SUBMITTED;
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index 618b8752dcf1..b616f11d8473 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -37,7 +37,7 @@ static int zero_map(struct dm_target *ti, struct bio *bio)
{
switch (bio_op(bio)) {
case REQ_OP_READ:
- if (bio->bi_rw & REQ_RAHEAD) {
+ if (bio->bi_opf & REQ_RAHEAD) {
/* readahead of null bytes only wastes buffer cache */
return -EIO;
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index dfa09e14e847..fa9b1cb4438a 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -798,12 +798,12 @@ static void dec_pending(struct dm_io *io, int error)
if (io_error == DM_ENDIO_REQUEUE)
return;
- if ((bio->bi_rw & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
+ if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
/*
* Preflush done for flush with data, reissue
* without REQ_PREFLUSH.
*/
- bio->bi_rw &= ~REQ_PREFLUSH;
+ bio->bi_opf &= ~REQ_PREFLUSH;
queue_io(md, bio);
} else {
/* done with normal IO or empty flush */
@@ -964,7 +964,7 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
{
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
- BUG_ON(bio->bi_rw & REQ_PREFLUSH);
+ BUG_ON(bio->bi_opf & REQ_PREFLUSH);
BUG_ON(bi_size > *tio->len_ptr);
BUG_ON(n_sectors > bi_size);
*tio->len_ptr -= bi_size - n_sectors;
@@ -1252,7 +1252,7 @@ static void __split_and_process_bio(struct mapped_device *md,
start_io_acct(ci.io);
- if (bio->bi_rw & REQ_PREFLUSH) {
+ if (bio->bi_opf & REQ_PREFLUSH) {
ci.bio = &ci.md->flush_bio;
ci.sector_count = 0;
error = __send_empty_flush(&ci);
@@ -1290,7 +1290,7 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
dm_put_live_table(md, srcu_idx);
- if (!(bio->bi_rw & REQ_RAHEAD))
+ if (!(bio->bi_opf & REQ_RAHEAD))
queue_io(md, bio);
else
bio_io_error(bio);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 70ff888d25d0..86f5d435901d 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -221,7 +221,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
struct bio *split;
sector_t start_sector, end_sector, data_offset;
- if (unlikely(bio->bi_rw & REQ_PREFLUSH)) {
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
md_flush_request(mddev, bio);
return;
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 2c3ab6f5e6be..d646f6e444f0 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -285,7 +285,7 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
*/
sectors = bio_sectors(bio);
/* bio could be mergeable after passing to underlayer */
- bio->bi_rw &= ~REQ_NOMERGE;
+ bio->bi_opf &= ~REQ_NOMERGE;
mddev->pers->make_request(mddev, bio);
cpu = part_stat_lock();
@@ -414,7 +414,7 @@ static void md_submit_flush_data(struct work_struct *ws)
/* an empty barrier - all done */
bio_endio(bio);
else {
- bio->bi_rw &= ~REQ_PREFLUSH;
+ bio->bi_opf &= ~REQ_PREFLUSH;
mddev->pers->make_request(mddev, bio);
}
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 4974682842ae..673efbd6fc47 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -91,7 +91,7 @@ static void multipath_end_request(struct bio *bio)
if (!bio->bi_error)
multipath_end_bh_io(mp_bh, 0);
- else if (!(bio->bi_rw & REQ_RAHEAD)) {
+ else if (!(bio->bi_opf & REQ_RAHEAD)) {
/*
* oops, IO error:
*/
@@ -112,7 +112,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
struct multipath_bh * mp_bh;
struct multipath_info *multipath;
- if (unlikely(bio->bi_rw & REQ_PREFLUSH)) {
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
md_flush_request(mddev, bio);
return;
}
@@ -135,7 +135,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
mp_bh->bio.bi_bdev = multipath->rdev->bdev;
- mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
+ mp_bh->bio.bi_opf |= REQ_FAILFAST_TRANSPORT;
mp_bh->bio.bi_end_io = multipath_end_request;
mp_bh->bio.bi_private = mp_bh;
generic_make_request(&mp_bh->bio);
@@ -360,7 +360,7 @@ static void multipathd(struct md_thread *thread)
bio->bi_iter.bi_sector +=
conf->multipaths[mp_bh->path].rdev->data_offset;
bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
- bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
+ bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
bio->bi_end_io = multipath_end_request;
bio->bi_private = mp_bh;
generic_make_request(bio);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index c3d439083212..258986a2699d 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -458,7 +458,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
struct md_rdev *tmp_dev;
struct bio *split;
- if (unlikely(bio->bi_rw & REQ_PREFLUSH)) {
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
md_flush_request(mddev, bio);
return;
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 46168ef2e279..21dc00eb1989 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1043,8 +1043,8 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
unsigned long flags;
const int op = bio_op(bio);
const int rw = bio_data_dir(bio);
- const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
- const unsigned long do_flush_fua = (bio->bi_rw &
+ const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
+ const unsigned long do_flush_fua = (bio->bi_opf &
(REQ_PREFLUSH | REQ_FUA));
struct md_rdev *blocked_rdev;
struct blk_plug_cb *cb;
@@ -2318,7 +2318,7 @@ read_more:
raid_end_bio_io(r1_bio);
} else {
const unsigned long do_sync
- = r1_bio->master_bio->bi_rw & REQ_SYNC;
+ = r1_bio->master_bio->bi_opf & REQ_SYNC;
if (bio) {
r1_bio->bios[r1_bio->read_disk] =
mddev->ro ? IO_BLOCKED : NULL;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index ed29fc899f06..0e4efcd10795 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1054,8 +1054,8 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
int i;
const int op = bio_op(bio);
const int rw = bio_data_dir(bio);
- const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
- const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
+ const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
+ const unsigned long do_fua = (bio->bi_opf & REQ_FUA);
unsigned long flags;
struct md_rdev *blocked_rdev;
struct blk_plug_cb *cb;
@@ -1440,7 +1440,7 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
struct bio *split;
- if (unlikely(bio->bi_rw & REQ_PREFLUSH)) {
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
md_flush_request(mddev, bio);
return;
}
@@ -2533,7 +2533,7 @@ read_more:
return;
}
- do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
+ do_sync = (r10_bio->master_bio->bi_opf & REQ_SYNC);
slot = r10_bio->read_slot;
printk_ratelimited(
KERN_ERR
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 5504ce2bac06..51f76ddbe265 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -536,7 +536,7 @@ int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
bio_endio(bio);
return 0;
}
- bio->bi_rw &= ~REQ_PREFLUSH;
+ bio->bi_opf &= ~REQ_PREFLUSH;
return -EAGAIN;
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index d189e894b921..8912407a4dd0 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -806,7 +806,7 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
dd_idx = 0;
while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
dd_idx++;
- if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw ||
+ if (head->dev[dd_idx].towrite->bi_opf != sh->dev[dd_idx].towrite->bi_opf ||
bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite))
goto unlock_out;
@@ -1003,7 +1003,7 @@ again:
pr_debug("%s: for %llu schedule op %d on disc %d\n",
__func__, (unsigned long long)sh->sector,
- bi->bi_rw, i);
+ bi->bi_opf, i);
atomic_inc(&sh->count);
if (sh != head_sh)
atomic_inc(&head_sh->count);
@@ -1014,7 +1014,7 @@ again:
bi->bi_iter.bi_sector = (sh->sector
+ rdev->data_offset);
if (test_bit(R5_ReadNoMerge, &head_sh->dev[i].flags))
- bi->bi_rw |= REQ_NOMERGE;
+ bi->bi_opf |= REQ_NOMERGE;
if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
@@ -1055,7 +1055,7 @@ again:
pr_debug("%s: for %llu schedule op %d on "
"replacement disc %d\n",
__func__, (unsigned long long)sh->sector,
- rbi->bi_rw, i);
+ rbi->bi_opf, i);
atomic_inc(&sh->count);
if (sh != head_sh)
atomic_inc(&head_sh->count);
@@ -1088,7 +1088,7 @@ again:
if (op_is_write(op))
set_bit(STRIPE_DEGRADED, &sh->state);
pr_debug("skip op %d on disc %d for sector %llu\n",
- bi->bi_rw, i, (unsigned long long)sh->sector);
+ bi->bi_opf, i, (unsigned long long)sh->sector);
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
}
@@ -1619,9 +1619,9 @@ again:
while (wbi && wbi->bi_iter.bi_sector <
dev->sector + STRIPE_SECTORS) {
- if (wbi->bi_rw & REQ_FUA)
+ if (wbi->bi_opf & REQ_FUA)
set_bit(R5_WantFUA, &dev->flags);
- if (wbi->bi_rw & REQ_SYNC)
+ if (wbi->bi_opf & REQ_SYNC)
set_bit(R5_SyncIO, &dev->flags);
if (bio_op(wbi) == REQ_OP_DISCARD)
set_bit(R5_Discard, &dev->flags);
@@ -5154,7 +5154,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
DEFINE_WAIT(w);
bool do_prepare;
- if (unlikely(bi->bi_rw & REQ_PREFLUSH)) {
+ if (unlikely(bi->bi_opf & REQ_PREFLUSH)) {
int ret = r5l_handle_flush_request(conf->log, bi);
if (ret == 0)
@@ -5237,7 +5237,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
(unsigned long long)logical_sector);
sh = raid5_get_active_stripe(conf, new_sector, previous,
- (bi->bi_rw & REQ_RAHEAD), 0);
+ (bi->bi_opf & REQ_RAHEAD), 0);
if (sh) {
if (unlikely(previous)) {
/* expansion might have moved on while waiting for a
@@ -5305,7 +5305,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
set_bit(STRIPE_HANDLE, &sh->state);
clear_bit(STRIPE_DELAYED, &sh->state);
if ((!sh->batch_head || sh == sh->batch_head) &&
- (bi->bi_rw & REQ_SYNC) &&
+ (bi->bi_opf & REQ_SYNC) &&
!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
atomic_inc(&conf->preread_active_stripes);
release_stripe_plug(mddev, sh);
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 7cf3bdfaf809..88e91666f145 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1133,11 +1133,11 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
struct page *page, unsigned int len, unsigned int off,
- int op, sector_t sector)
+ bool is_write, sector_t sector)
{
int ret;
- if (!op_is_write(op)) {
+ if (!is_write) {
ret = btt_read_pg(btt, bip, page, off, sector, len);
flush_dcache_page(page);
} else {
@@ -1180,7 +1180,7 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
BUG_ON(len % btt->sector_size);
err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
- bio_op(bio), iter.bi_sector);
+ op_is_write(bio_op(bio)), iter.bi_sector);
if (err) {
dev_info(&btt->nd_btt->dev,
"io error in %s sector %lld, len %d,\n",
@@ -1200,12 +1200,12 @@ out:
}
static int btt_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, int op)
+ struct page *page, bool is_write)
{
struct btt *btt = bdev->bd_disk->private_data;
- btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, op, sector);
- page_endio(page, op, 0);
+ btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, is_write, sector);
+ page_endio(page, is_write, 0);
return 0;
}
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index d64d92481c1d..571a6c7ee2fc 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -67,7 +67,7 @@ static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
}
static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
- unsigned int len, unsigned int off, int op,
+ unsigned int len, unsigned int off, bool is_write,
sector_t sector)
{
int rc = 0;
@@ -79,7 +79,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
bad_pmem = true;
- if (!op_is_write(op)) {
+ if (!is_write) {
if (unlikely(bad_pmem))
rc = -EIO;
else {
@@ -128,13 +128,13 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
struct pmem_device *pmem = q->queuedata;
struct nd_region *nd_region = to_region(pmem);
- if (bio->bi_rw & REQ_FLUSH)
+ if (bio->bi_opf & REQ_FLUSH)
nvdimm_flush(nd_region);
do_acct = nd_iostat_start(bio, &start);
bio_for_each_segment(bvec, bio, iter) {
rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
- bvec.bv_offset, bio_op(bio),
+ bvec.bv_offset, op_is_write(bio_op(bio)),
iter.bi_sector);
if (rc) {
bio->bi_error = rc;
@@ -144,7 +144,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
if (do_acct)
nd_iostat_end(bio, start);
- if (bio->bi_rw & REQ_FUA)
+ if (bio->bi_opf & REQ_FUA)
nvdimm_flush(nd_region);
bio_endio(bio);
@@ -152,12 +152,12 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
}
static int pmem_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, int op)
+ struct page *page, bool is_write)
{
struct pmem_device *pmem = bdev->bd_queue->queuedata;
int rc;
- rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, op, sector);
+ rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, is_write, sector);
/*
* The ->rw_page interface is subtle and tricky. The core
@@ -166,7 +166,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
* caused by double completion.
*/
if (rc == 0)
- page_endio(page, op, 0);
+ page_endio(page, is_write, 0);
return rc;
}
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 47cf6c977367..372d744315f3 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -388,7 +388,7 @@ iblock_execute_sync_cache(struct se_cmd *cmd)
bio = bio_alloc(GFP_KERNEL, 0);
bio->bi_end_io = iblock_end_io_flush;
bio->bi_bdev = ib_dev->ibd_bd;
- bio->bi_rw = WRITE_FLUSH;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
if (!immed)
bio->bi_private = cmd;
submit_bio(bio);