summaryrefslogtreecommitdiff
path: root/drivers/md/raid1.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-01-12 21:35:23 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2022-01-12 21:35:23 +0300
commitc9193f48e94deaeff0c9abbc67b9584e8ddc42ed (patch)
treefb50432abb9783a3a78a079e2b142bf3206ea1ca /drivers/md/raid1.c
parentd3c810803576d867265277df8e94eee386351c9d (diff)
parentd85bd8233fff000567cda4e108112bcb33478616 (diff)
downloadlinux-c9193f48e94deaeff0c9abbc67b9584e8ddc42ed.tar.xz
Merge tag 'for-5.17/drivers-2022-01-11' of git://git.kernel.dk/linux-block
Pull block driver updates from Jens Axboe: - mtip32xx pci cleanups (Bjorn) - mtip32xx conversion to generic power management (Vaibhav) - rsxx pci powermanagement cleanups (Bjorn) - Remove the rsxx driver. This hardware never saw much adoption, and it's been end of lifed for a while. (Christoph) - MD pull request from Song: - REQ_NOWAIT support (Vishal Verma) - raid6 benchmark optimization (Dirk Müller) - Fix for acct bioset (Xiao Ni) - Clean up max_queued_requests (Mariusz Tkaczyk) - PREEMPT_RT optimization (Davidlohr Bueso) - Use default_groups in kobj_type (Greg Kroah-Hartman) - Use attribute groups in pktcdvd and rnbd (Greg) - NVMe pull request from Christoph: - increment request genctr on completion (Keith Busch, Geliang Tang) - add a 'iopolicy' module parameter (Hannes Reinecke) - print out valid arguments when reading from /dev/nvme-fabrics (Hannes Reinecke) - Use struct_group() in drbd (Kees) - null_blk fixes (Ming) - Get rid of congestion logic in pktcdvd (Neil) - Floppy ejection hang fix (Tasos) - Floppy max user request size fix (Xiongwei) - Loop locking fix (Tetsuo) * tag 'for-5.17/drivers-2022-01-11' of git://git.kernel.dk/linux-block: (32 commits) md: use default_groups in kobj_type md: Move alloc/free acct bioset in to personality lib/raid6: Use strict priority ranking for pq gen() benchmarking lib/raid6: skip benchmark of non-chosen xor_syndrome functions md: fix spelling of "its" md: raid456 add nowait support md: raid10 add nowait support md: raid1 add nowait support md: add support for REQ_NOWAIT md: drop queue limitation for RAID1 and RAID10 md/raid5: play nice with PREEMPT_RT block/rnbd-clt-sysfs: use default_groups in kobj_type pktcdvd: convert to use attribute groups block: null_blk: only set set->nr_maps as 3 if active poll_queues is > 0 nvme: add 'iopolicy' module parameter nvme: drop unused variable ctrl in nvme_setup_cmd nvme: increment request genctr on completion nvme-fabrics: print out valid arguments when reading from /dev/nvme-fabrics block: remove the rsxx driver rsxx: Drop PCI legacy power management ...
Diffstat (limited to 'drivers/md/raid1.c')
-rw-r--r--drivers/md/raid1.c83
1 files changed, 56 insertions, 27 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 85505424f7a4..e2d8acb1e988 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -929,8 +929,10 @@ static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
wake_up(&conf->wait_barrier);
}
-static void _wait_barrier(struct r1conf *conf, int idx)
+static bool _wait_barrier(struct r1conf *conf, int idx, bool nowait)
{
+ bool ret = true;
+
/*
* We need to increase conf->nr_pending[idx] very early here,
* then raise_barrier() can be blocked when it waits for
@@ -961,7 +963,7 @@ static void _wait_barrier(struct r1conf *conf, int idx)
*/
if (!READ_ONCE(conf->array_frozen) &&
!atomic_read(&conf->barrier[idx]))
- return;
+ return ret;
/*
* After holding conf->resync_lock, conf->nr_pending[idx]
@@ -979,18 +981,27 @@ static void _wait_barrier(struct r1conf *conf, int idx)
*/
wake_up(&conf->wait_barrier);
/* Wait for the barrier in same barrier unit bucket to drop. */
- wait_event_lock_irq(conf->wait_barrier,
- !conf->array_frozen &&
- !atomic_read(&conf->barrier[idx]),
- conf->resync_lock);
- atomic_inc(&conf->nr_pending[idx]);
+
+ /* Return false when nowait flag is set */
+ if (nowait) {
+ ret = false;
+ } else {
+ wait_event_lock_irq(conf->wait_barrier,
+ !conf->array_frozen &&
+ !atomic_read(&conf->barrier[idx]),
+ conf->resync_lock);
+ atomic_inc(&conf->nr_pending[idx]);
+ }
+
atomic_dec(&conf->nr_waiting[idx]);
spin_unlock_irq(&conf->resync_lock);
+ return ret;
}
-static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
+static bool wait_read_barrier(struct r1conf *conf, sector_t sector_nr, bool nowait)
{
int idx = sector_to_idx(sector_nr);
+ bool ret = true;
/*
* Very similar to _wait_barrier(). The difference is, for read
@@ -1002,7 +1013,7 @@ static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
atomic_inc(&conf->nr_pending[idx]);
if (!READ_ONCE(conf->array_frozen))
- return;
+ return ret;
spin_lock_irq(&conf->resync_lock);
atomic_inc(&conf->nr_waiting[idx]);
@@ -1013,19 +1024,28 @@ static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
*/
wake_up(&conf->wait_barrier);
/* Wait for array to be unfrozen */
- wait_event_lock_irq(conf->wait_barrier,
- !conf->array_frozen,
- conf->resync_lock);
- atomic_inc(&conf->nr_pending[idx]);
+
+ /* Return false when nowait flag is set */
+ if (nowait) {
+ /* Return false when nowait flag is set */
+ ret = false;
+ } else {
+ wait_event_lock_irq(conf->wait_barrier,
+ !conf->array_frozen,
+ conf->resync_lock);
+ atomic_inc(&conf->nr_pending[idx]);
+ }
+
atomic_dec(&conf->nr_waiting[idx]);
spin_unlock_irq(&conf->resync_lock);
+ return ret;
}
-static void wait_barrier(struct r1conf *conf, sector_t sector_nr)
+static bool wait_barrier(struct r1conf *conf, sector_t sector_nr, bool nowait)
{
int idx = sector_to_idx(sector_nr);
- _wait_barrier(conf, idx);
+ return _wait_barrier(conf, idx, nowait);
}
static void _allow_barrier(struct r1conf *conf, int idx)
@@ -1236,7 +1256,11 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
* Still need barrier for READ in case that whole
* array is frozen.
*/
- wait_read_barrier(conf, bio->bi_iter.bi_sector);
+ if (!wait_read_barrier(conf, bio->bi_iter.bi_sector,
+ bio->bi_opf & REQ_NOWAIT)) {
+ bio_wouldblock_error(bio);
+ return;
+ }
if (!r1_bio)
r1_bio = alloc_r1bio(mddev, bio);
@@ -1336,6 +1360,10 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
bio->bi_iter.bi_sector, bio_end_sector(bio))) {
DEFINE_WAIT(w);
+ if (bio->bi_opf & REQ_NOWAIT) {
+ bio_wouldblock_error(bio);
+ return;
+ }
for (;;) {
prepare_to_wait(&conf->wait_barrier,
&w, TASK_IDLE);
@@ -1353,17 +1381,15 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
* thread has put up a bar for new requests.
* Continue immediately if no resync is active currently.
*/
- wait_barrier(conf, bio->bi_iter.bi_sector);
+ if (!wait_barrier(conf, bio->bi_iter.bi_sector,
+ bio->bi_opf & REQ_NOWAIT)) {
+ bio_wouldblock_error(bio);
+ return;
+ }
r1_bio = alloc_r1bio(mddev, bio);
r1_bio->sectors = max_write_sectors;
- if (conf->pending_count >= max_queued_requests) {
- md_wakeup_thread(mddev->thread);
- raid1_log(mddev, "wait queued");
- wait_event(conf->wait_barrier,
- conf->pending_count < max_queued_requests);
- }
/* first select target devices under rcu_lock and
* inc refcount on their rdev. Record them by setting
* bios[x] to bio
@@ -1458,9 +1484,14 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
rdev_dec_pending(conf->mirrors[j].rdev, mddev);
r1_bio->state = 0;
allow_barrier(conf, bio->bi_iter.bi_sector);
+
+ if (bio->bi_opf & REQ_NOWAIT) {
+ bio_wouldblock_error(bio);
+ return;
+ }
raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
md_wait_for_blocked_rdev(blocked_rdev, mddev);
- wait_barrier(conf, bio->bi_iter.bi_sector);
+ wait_barrier(conf, bio->bi_iter.bi_sector, false);
goto retry_write;
}
@@ -1688,7 +1719,7 @@ static void close_sync(struct r1conf *conf)
int idx;
for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) {
- _wait_barrier(conf, idx);
+ _wait_barrier(conf, idx, false);
_allow_barrier(conf, idx);
}
@@ -3410,5 +3441,3 @@ MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
MODULE_ALIAS("md-personality-3"); /* RAID1 */
MODULE_ALIAS("md-raid1");
MODULE_ALIAS("md-level-1");
-
-module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);