summaryrefslogtreecommitdiff
path: root/drivers/md/raid5.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-07-27 01:03:07 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-27 01:03:07 +0300
commitd05d7f40791ccbb6e543cc5dd6a6aa08fc71d635 (patch)
treedc0039fe490a41a70de10d58fe8e6136db46463a /drivers/md/raid5.c
parent75a442efb1ca613f8d1cc71a32c2c9b0aefae4a5 (diff)
parent17007f3994cdb4643355c73f54f0adad006cf59e (diff)
downloadlinux-d05d7f40791ccbb6e543cc5dd6a6aa08fc71d635.tar.xz
Merge branch 'for-4.8/core' of git://git.kernel.dk/linux-block
Pull core block updates from Jens Axboe: - the big change is the cleanup from Mike Christie, cleaning up our uses of command types and modified flags. This is what will throw some merge conflicts - regression fix for the above for btrfs, from Vincent - following up to the above, better packing of struct request from Christoph - a 2038 fix for blktrace from Arnd - a few trivial/spelling fixes from Bart Van Assche - a front merge check fix from Damien, which could cause issues on SMR drives - Atari partition fix from Gabriel - convert cfq to highres timers, since jiffies isn't granular enough for some devices these days. From Jan and Jeff - CFQ priority boost fix idle classes, from me - cleanup series from Ming, improving our bio/bvec iteration - a direct issue fix for blk-mq from Omar - fix for plug merging not involving the IO scheduler, like we do for other types of merges. From Tahsin - expose DAX type internally and through sysfs. From Toshi and Yigal * 'for-4.8/core' of git://git.kernel.dk/linux-block: (76 commits) block: Fix front merge check block: do not merge requests without consulting with io scheduler block: Fix spelling in a source code comment block: expose QUEUE_FLAG_DAX in sysfs block: add QUEUE_FLAG_DAX for devices to advertise their DAX support Btrfs: fix comparison in __btrfs_map_block() block: atari: Return early for unsupported sector size Doc: block: Fix a typo in queue-sysfs.txt cfq-iosched: Charge at least 1 jiffie instead of 1 ns cfq-iosched: Fix regression in bonnie++ rewrite performance cfq-iosched: Convert slice_resid from u64 to s64 block: Convert fifo_time from ulong to u64 blktrace: avoid using timespec block/blk-cgroup.c: Declare local symbols static block/bio-integrity.c: Add #include "blk.h" block/partition-generic.c: Remove a set-but-not-used variable block: bio: kill BIO_MAX_SIZE cfq-iosched: temporarily boost queue priority for idle classes block: drbd: avoid to use BIO_MAX_SIZE block: bio: remove BIO_MAX_SECTORS ...
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r--drivers/md/raid5.c48
1 files changed, 24 insertions, 24 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 8959e6dd31dd..7aacf5b55e15 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -806,7 +806,8 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
dd_idx = 0;
while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
dd_idx++;
- if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw)
+ if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw ||
+ bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite))
goto unlock_out;
if (head->batch_head) {
@@ -891,29 +892,28 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
if (r5l_write_stripe(conf->log, sh) == 0)
return;
for (i = disks; i--; ) {
- int rw;
+ int op, op_flags = 0;
int replace_only = 0;
struct bio *bi, *rbi;
struct md_rdev *rdev, *rrdev = NULL;
sh = head_sh;
if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
+ op = REQ_OP_WRITE;
if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
- rw = WRITE_FUA;
- else
- rw = WRITE;
+ op_flags = WRITE_FUA;
if (test_bit(R5_Discard, &sh->dev[i].flags))
- rw |= REQ_DISCARD;
+ op = REQ_OP_DISCARD;
} else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
- rw = READ;
+ op = REQ_OP_READ;
else if (test_and_clear_bit(R5_WantReplace,
&sh->dev[i].flags)) {
- rw = WRITE;
+ op = REQ_OP_WRITE;
replace_only = 1;
} else
continue;
if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags))
- rw |= REQ_SYNC;
+ op_flags |= REQ_SYNC;
again:
bi = &sh->dev[i].req;
@@ -927,7 +927,7 @@ again:
rdev = rrdev;
rrdev = NULL;
}
- if (rw & WRITE) {
+ if (op_is_write(op)) {
if (replace_only)
rdev = NULL;
if (rdev == rrdev)
@@ -953,7 +953,7 @@ again:
* need to check for writes. We never accept write errors
* on the replacement, so we don't to check rrdev.
*/
- while ((rw & WRITE) && rdev &&
+ while (op_is_write(op) && rdev &&
test_bit(WriteErrorSeen, &rdev->flags)) {
sector_t first_bad;
int bad_sectors;
@@ -995,13 +995,13 @@ again:
bio_reset(bi);
bi->bi_bdev = rdev->bdev;
- bi->bi_rw = rw;
- bi->bi_end_io = (rw & WRITE)
+ bio_set_op_attrs(bi, op, op_flags);
+ bi->bi_end_io = op_is_write(op)
? raid5_end_write_request
: raid5_end_read_request;
bi->bi_private = sh;
- pr_debug("%s: for %llu schedule op %ld on disc %d\n",
+ pr_debug("%s: for %llu schedule op %d on disc %d\n",
__func__, (unsigned long long)sh->sector,
bi->bi_rw, i);
atomic_inc(&sh->count);
@@ -1027,7 +1027,7 @@ again:
* If this is discard request, set bi_vcnt 0. We don't
* want to confuse SCSI because SCSI will replace payload
*/
- if (rw & REQ_DISCARD)
+ if (op == REQ_OP_DISCARD)
bi->bi_vcnt = 0;
if (rrdev)
set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
@@ -1047,12 +1047,12 @@ again:
bio_reset(rbi);
rbi->bi_bdev = rrdev->bdev;
- rbi->bi_rw = rw;
- BUG_ON(!(rw & WRITE));
+ bio_set_op_attrs(rbi, op, op_flags);
+ BUG_ON(!op_is_write(op));
rbi->bi_end_io = raid5_end_write_request;
rbi->bi_private = sh;
- pr_debug("%s: for %llu schedule op %ld on "
+ pr_debug("%s: for %llu schedule op %d on "
"replacement disc %d\n",
__func__, (unsigned long long)sh->sector,
rbi->bi_rw, i);
@@ -1076,7 +1076,7 @@ again:
* If this is discard request, set bi_vcnt 0. We don't
* want to confuse SCSI because SCSI will replace payload
*/
- if (rw & REQ_DISCARD)
+ if (op == REQ_OP_DISCARD)
rbi->bi_vcnt = 0;
if (conf->mddev->gendisk)
trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
@@ -1085,9 +1085,9 @@ again:
generic_make_request(rbi);
}
if (!rdev && !rrdev) {
- if (rw & WRITE)
+ if (op_is_write(op))
set_bit(STRIPE_DEGRADED, &sh->state);
- pr_debug("skip op %ld on disc %d for sector %llu\n",
+ pr_debug("skip op %d on disc %d for sector %llu\n",
bi->bi_rw, i, (unsigned long long)sh->sector);
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
@@ -1623,7 +1623,7 @@ again:
set_bit(R5_WantFUA, &dev->flags);
if (wbi->bi_rw & REQ_SYNC)
set_bit(R5_SyncIO, &dev->flags);
- if (wbi->bi_rw & REQ_DISCARD)
+ if (bio_op(wbi) == REQ_OP_DISCARD)
set_bit(R5_Discard, &dev->flags);
else {
tx = async_copy_data(1, wbi, &dev->page,
@@ -5150,7 +5150,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
DEFINE_WAIT(w);
bool do_prepare;
- if (unlikely(bi->bi_rw & REQ_FLUSH)) {
+ if (unlikely(bi->bi_rw & REQ_PREFLUSH)) {
int ret = r5l_handle_flush_request(conf->log, bi);
if (ret == 0)
@@ -5176,7 +5176,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
return;
}
- if (unlikely(bi->bi_rw & REQ_DISCARD)) {
+ if (unlikely(bio_op(bi) == REQ_OP_DISCARD)) {
make_discard_request(mddev, bi);
return;
}