From 2c4193df379bb89114ff60d4b0fa66131abe6a75 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Mon, 18 Jul 2011 17:38:43 +1000 Subject: md/raid10: get rid of duplicated conditional expression Variable 'first' is initialized to zero and updated to @rdev->raid_disk only if it is greater than 0. Thus condition '>= first' always implies '>= 0' so the latter is not needed. Signed-off-by: Namhyung Kim Signed-off-by: NeilBrown --- drivers/md/raid10.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 6e846688962f..d55ae12b2d8c 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1093,8 +1093,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) if (rdev->raid_disk >= 0) first = last = rdev->raid_disk; - if (rdev->saved_raid_disk >= 0 && - rdev->saved_raid_disk >= first && + if (rdev->saved_raid_disk >= first && conf->mirrors[rdev->saved_raid_disk].rdev == NULL) mirror = rdev->saved_raid_disk; else -- cgit v1.2.3 From 778ca01852e6cc9ff335119b37a1938a978df384 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Mon, 18 Jul 2011 17:38:47 +1000 Subject: md/raid10: factor out common bio handling code When normal-write and sync-read/write bio completes, we should find out the disk number the bio belongs to. Factor those common code out to a separate function. Signed-off-by: Namhyung Kim Signed-off-by: NeilBrown --- drivers/md/raid10.c | 44 +++++++++++++++++++++++--------------------- 1 file changed, 23 insertions(+), 21 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index d55ae12b2d8c..e434f1e8d223 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -244,6 +244,23 @@ static inline void update_head_pos(int slot, r10bio_t *r10_bio) r10_bio->devs[slot].addr + (r10_bio->sectors); } +/* + * Find the disk number which triggered given bio + */ +static int find_bio_disk(conf_t *conf, r10bio_t *r10_bio, struct bio *bio) +{ + int slot; + + for (slot = 0; slot < conf->copies; slot++) + if (r10_bio->devs[slot].bio == bio) + break; + + BUG_ON(slot == conf->copies); + update_head_pos(slot, r10_bio); + + return r10_bio->devs[slot].devnum; +} + static void raid10_end_read_request(struct bio *bio, int error) { int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); @@ -289,13 +306,10 @@ static void raid10_end_write_request(struct bio *bio, int error) { int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); r10bio_t *r10_bio = bio->bi_private; - int slot, dev; + int dev; conf_t *conf = r10_bio->mddev->private; - for (slot = 0; slot < conf->copies; slot++) - if (r10_bio->devs[slot].bio == bio) - break; - dev = r10_bio->devs[slot].devnum; + dev = find_bio_disk(conf, r10_bio, bio); /* * this branch is our 'one mirror IO has finished' event handler: @@ -316,8 +330,6 @@ static void raid10_end_write_request(struct bio *bio, int error) */ set_bit(R10BIO_Uptodate, &r10_bio->state); - update_head_pos(slot, r10_bio); - /* * * Let's see if all mirrored write operations have finished @@ -1173,14 +1185,9 @@ static void end_sync_read(struct bio *bio, int error) { r10bio_t *r10_bio = bio->bi_private; conf_t *conf = r10_bio->mddev->private; - int i,d; + int d; - for (i=0; icopies; i++) - if (r10_bio->devs[i].bio == bio) - break; - BUG_ON(i == conf->copies); - update_head_pos(i, r10_bio); - d = r10_bio->devs[i].devnum; + d = find_bio_disk(conf, r10_bio, bio); if (test_bit(BIO_UPTODATE, &bio->bi_flags)) set_bit(R10BIO_Uptodate, &r10_bio->state); @@ -1211,18 +1218,13 @@ static void end_sync_write(struct bio *bio, int error) r10bio_t *r10_bio = bio->bi_private; mddev_t *mddev = r10_bio->mddev; conf_t *conf = mddev->private; - int i,d; + int d; - for (i = 0; i < conf->copies; i++) - if (r10_bio->devs[i].bio == bio) - break; - d = r10_bio->devs[i].devnum; + d = find_bio_disk(conf, r10_bio, bio); if (!uptodate) md_error(mddev, conf->mirrors[d].rdev); - update_head_pos(i, r10_bio); - rdev_dec_pending(conf->mirrors[d].rdev, mddev); while (atomic_dec_and_test(&r10_bio->remaining)) { if (r10_bio->master_bio == NULL) { -- cgit v1.2.3 From c65060ad4274f70048d62e0a86332cd3fd23f28d Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Mon, 18 Jul 2011 17:38:49 +1000 Subject: md/raid10: share pages between read and write bio's during recovery When performing a recovery, only first 2 slots in r10_bio are in use, for read and write respectively. However all of pages in the write bio are never used and just replaced to read bio's when the read completes. Get rid of those unused pages and share read pages properly. Signed-off-by: Namhyung Kim Signed-off-by: NeilBrown --- drivers/md/raid10.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index e434f1e8d223..3715e220e5e0 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -123,7 +123,14 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data) for (j = 0 ; j < nalloc; j++) { bio = r10_bio->devs[j].bio; for (i = 0; i < RESYNC_PAGES; i++) { - page = alloc_page(gfp_flags); + if (j == 1 && !test_bit(MD_RECOVERY_SYNC, + &conf->mddev->recovery)) { + /* we can share bv_page's during recovery */ + struct bio *rbio = r10_bio->devs[0].bio; + page = rbio->bi_io_vec[i].bv_page; + get_page(page); + } else + page = alloc_page(gfp_flags); if (unlikely(!page)) goto out_free_pages; @@ -1360,20 +1367,14 @@ done: static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio) { conf_t *conf = mddev->private; - int i, d; - struct bio *bio, *wbio; - + int d; + struct bio *wbio; - /* move the pages across to the second bio + /* + * share the pages with the first bio * and submit the write request */ - bio = r10_bio->devs[0].bio; wbio = r10_bio->devs[1].bio; - for (i=0; i < wbio->bi_vcnt; i++) { - struct page *p = bio->bi_io_vec[i].bv_page; - bio->bi_io_vec[i].bv_page = wbio->bi_io_vec[i].bv_page; - wbio->bi_io_vec[i].bv_page = p; - } d = r10_bio->devs[1].devnum; atomic_inc(&conf->mirrors[d].rdev->nr_pending); -- cgit v1.2.3 From 6ce328462c1145a217ba1f27b882743be1407759 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Mon, 18 Jul 2011 17:38:50 +1000 Subject: md/raid5: use kmem_cache_zalloc() Replace kmem_cache_alloc + memset(,0,) to kmem_cache_zalloc. I think it's not harmful since @conf->slab_cache already knows actual size of struct stripe_head. Signed-off-by: Namhyung Kim Signed-off-by: NeilBrown --- drivers/md/raid5.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index b72edf35ec54..0f71aa9a07c5 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1315,10 +1315,10 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) static int grow_one_stripe(raid5_conf_t *conf) { struct stripe_head *sh; - sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL); + sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL); if (!sh) return 0; - memset(sh, 0, sizeof(*sh) + (conf->pool_size-1)*sizeof(struct r5dev)); + sh->raid_conf = conf; spin_lock_init(&sh->lock); #ifdef CONFIG_MULTICORE_RAID456 @@ -1435,12 +1435,10 @@ static int resize_stripes(raid5_conf_t *conf, int newsize) return -ENOMEM; for (i = conf->max_nr_stripes; i; i--) { - nsh = kmem_cache_alloc(sc, GFP_KERNEL); + nsh = kmem_cache_zalloc(sc, GFP_KERNEL); if (!nsh) break; - memset(nsh, 0, sizeof(*nsh) + (newsize-1)*sizeof(struct r5dev)); - nsh->raid_conf = conf; spin_lock_init(&nsh->lock); #ifdef CONFIG_MULTICORE_RAID456 -- cgit v1.2.3 From ffd96e35c16a99fdb490cc5723b8e32135ae5883 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Mon, 18 Jul 2011 17:38:51 +1000 Subject: md/raid5: get rid of duplicated call to bio_data_dir() In raid5::make_request(), once bio_data_dir(@bi) is detected it never (and couldn't) be changed. Use the result always. Signed-off-by: Namhyung Kim Signed-off-by: NeilBrown --- drivers/md/raid5.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 0f71aa9a07c5..71480646a459 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -4014,7 +4014,7 @@ static int make_request(mddev_t *mddev, struct bio * bi) } } - if (bio_data_dir(bi) == WRITE && + if (rw == WRITE && logical_sector >= mddev->suspend_lo && logical_sector < mddev->suspend_hi) { release_stripe(sh); @@ -4032,7 +4032,7 @@ static int make_request(mddev_t *mddev, struct bio * bi) } if (test_bit(STRIPE_EXPANDING, &sh->state) || - !add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) { + !add_stripe_bio(sh, bi, dd_idx, rw)) { /* Stripe is busy expanding or * add failed due to overlap. Flush everything * and wait a while -- cgit v1.2.3 From 83206d66b65118d995c38746f21edc2bb8564b49 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 26 Jul 2011 11:19:49 +1000 Subject: md/raid5: Remove use of sh->lock in sync_request This is the start of a series of patches to remove sh->lock. sync_request takes sh->lock before setting STRIPE_SYNCING to ensure there is no race with testing it in handle_stripe[56]. Instead, use a new flag STRIPE_SYNC_REQUESTED and test it early in handle_stripe[56] (after getting the same lock) and perform the same set/clear operations if it was set. Signed-off-by: NeilBrown Reviewed-by: Namhyung Kim --- drivers/md/raid5.c | 13 +++++++++---- drivers/md/raid5.h | 33 ++++++++++++++++++--------------- 2 files changed, 27 insertions(+), 19 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 71480646a459..f2f2ab329690 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3022,6 +3022,10 @@ static void handle_stripe5(struct stripe_head *sh) sh->reconstruct_state); spin_lock(&sh->lock); + if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { + set_bit(STRIPE_SYNCING, &sh->state); + clear_bit(STRIPE_INSYNC, &sh->state); + } clear_bit(STRIPE_HANDLE, &sh->state); clear_bit(STRIPE_DELAYED, &sh->state); @@ -3313,6 +3317,10 @@ static void handle_stripe6(struct stripe_head *sh) memset(&s, 0, sizeof(s)); spin_lock(&sh->lock); + if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { + set_bit(STRIPE_SYNCING, &sh->state); + clear_bit(STRIPE_INSYNC, &sh->state); + } clear_bit(STRIPE_HANDLE, &sh->state); clear_bit(STRIPE_DELAYED, &sh->state); @@ -4373,10 +4381,7 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); - spin_lock(&sh->lock); - set_bit(STRIPE_SYNCING, &sh->state); - clear_bit(STRIPE_INSYNC, &sh->state); - spin_unlock(&sh->lock); + set_bit(STRIPE_SYNC_REQUESTED, &sh->state); handle_stripe(sh); release_stripe(sh); diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 3ca77a2613ba..a33001137bf8 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -289,21 +289,24 @@ struct r6_state { /* * Stripe state */ -#define STRIPE_HANDLE 2 -#define STRIPE_SYNCING 3 -#define STRIPE_INSYNC 4 -#define STRIPE_PREREAD_ACTIVE 5 -#define STRIPE_DELAYED 6 -#define STRIPE_DEGRADED 7 -#define STRIPE_BIT_DELAY 8 -#define STRIPE_EXPANDING 9 -#define STRIPE_EXPAND_SOURCE 10 -#define STRIPE_EXPAND_READY 11 -#define STRIPE_IO_STARTED 12 /* do not count towards 'bypass_count' */ -#define STRIPE_FULL_WRITE 13 /* all blocks are set to be overwritten */ -#define STRIPE_BIOFILL_RUN 14 -#define STRIPE_COMPUTE_RUN 15 -#define STRIPE_OPS_REQ_PENDING 16 +enum { + STRIPE_HANDLE, + STRIPE_SYNC_REQUESTED, + STRIPE_SYNCING, + STRIPE_INSYNC, + STRIPE_PREREAD_ACTIVE, + STRIPE_DELAYED, + STRIPE_DEGRADED, + STRIPE_BIT_DELAY, + STRIPE_EXPANDING, + STRIPE_EXPAND_SOURCE, + STRIPE_EXPAND_READY, + STRIPE_IO_STARTED, /* do not count towards 'bypass_count' */ + STRIPE_FULL_WRITE, /* all blocks are set to be overwritten */ + STRIPE_BIOFILL_RUN, + STRIPE_COMPUTE_RUN, + STRIPE_OPS_REQ_PENDING, +}; /* * Operation request flags -- cgit v1.2.3 From cbe47ec559c33a68b5ee002051b848d1531a8adb Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 26 Jul 2011 11:20:35 +1000 Subject: md/raid5: Protect some more code with ->device_lock. Other places that change or follow dev->towrite and dev->written take the device_lock as well as the sh->lock. So it should really be held in these places too. Also, doing so will allow sh->lock to be discarded. with merged fixes by: Namhyung Kim Signed-off-by: NeilBrown Reviewed-by: Namhyung Kim --- drivers/md/raid5.c | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index f2f2ab329690..9985138f4c04 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1021,10 +1021,12 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) struct bio *wbi; spin_lock(&sh->lock); + spin_lock_irq(&sh->raid_conf->device_lock); chosen = dev->towrite; dev->towrite = NULL; BUG_ON(dev->written); wbi = dev->written = chosen; + spin_unlock_irq(&sh->raid_conf->device_lock); spin_unlock(&sh->lock); while (wbi && wbi->bi_sector < @@ -2141,7 +2143,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in raid5_conf_t *conf = sh->raid_conf; int firstwrite=0; - pr_debug("adding bh b#%llu to stripe s#%llu\n", + pr_debug("adding bi b#%llu to stripe s#%llu\n", (unsigned long long)bi->bi_sector, (unsigned long long)sh->sector); @@ -2167,19 +2169,6 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in bi->bi_next = *bip; *bip = bi; bi->bi_phys_segments++; - spin_unlock_irq(&conf->device_lock); - spin_unlock(&sh->lock); - - pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", - (unsigned long long)bi->bi_sector, - (unsigned long long)sh->sector, dd_idx); - - if (conf->mddev->bitmap && firstwrite) { - bitmap_startwrite(conf->mddev->bitmap, sh->sector, - STRIPE_SECTORS, 0); - sh->bm_seq = conf->seq_flush+1; - set_bit(STRIPE_BIT_DELAY, &sh->state); - } if (forwrite) { /* check if page is covered */ @@ -2194,6 +2183,19 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags); } + spin_unlock_irq(&conf->device_lock); + spin_unlock(&sh->lock); + + pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", + (unsigned long long)(*bip)->bi_sector, + (unsigned long long)sh->sector, dd_idx); + + if (conf->mddev->bitmap && firstwrite) { + bitmap_startwrite(conf->mddev->bitmap, sh->sector, + STRIPE_SECTORS, 0); + sh->bm_seq = conf->seq_flush+1; + set_bit(STRIPE_BIT_DELAY, &sh->state); + } return 1; overlap: -- cgit v1.2.3 From c4c1663be46b2ab94e59d3e0c583a8f6b188ff0c Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 26 Jul 2011 11:34:20 +1000 Subject: md/raid5: replace sh->lock with an 'active' flag. sh->lock is now mainly used to ensure that two threads aren't running in the locked part of handle_stripe[56] at the same time. That can more neatly be achieved with an 'active' flag which we set while running handle_stripe. If we find the flag is set, we simply requeue the stripe for later by setting STRIPE_HANDLE. For safety we take ->device_lock while examining the state of the stripe and creating a summary in 'stripe_head_state / r6_state'. This possibly isn't needed but as shared fields like ->toread, ->towrite are checked it is safer for now at least. We leave the label after the old 'unlock' called "unlock" because it will disappear in a few patches, so renaming seems pointless. This leaves the stripe 'locked' for longer as we clear STRIPE_ACTIVE later, but that is not a problem. Signed-off-by: NeilBrown Reviewed-by: Namhyung Kim --- drivers/md/raid5.c | 26 +++++++++++++------------- drivers/md/raid5.h | 35 ++++++++++++++++------------------- 2 files changed, 29 insertions(+), 32 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 9985138f4c04..f8275b5a6fbe 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1020,14 +1020,12 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) { struct bio *wbi; - spin_lock(&sh->lock); spin_lock_irq(&sh->raid_conf->device_lock); chosen = dev->towrite; dev->towrite = NULL; BUG_ON(dev->written); wbi = dev->written = chosen; spin_unlock_irq(&sh->raid_conf->device_lock); - spin_unlock(&sh->lock); while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) { @@ -1322,7 +1320,6 @@ static int grow_one_stripe(raid5_conf_t *conf) return 0; sh->raid_conf = conf; - spin_lock_init(&sh->lock); #ifdef CONFIG_MULTICORE_RAID456 init_waitqueue_head(&sh->ops.wait_for_ops); #endif @@ -1442,7 +1439,6 @@ static int resize_stripes(raid5_conf_t *conf, int newsize) break; nsh->raid_conf = conf; - spin_lock_init(&nsh->lock); #ifdef CONFIG_MULTICORE_RAID456 init_waitqueue_head(&nsh->ops.wait_for_ops); #endif @@ -2148,7 +2144,6 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in (unsigned long long)sh->sector); - spin_lock(&sh->lock); spin_lock_irq(&conf->device_lock); if (forwrite) { bip = &sh->dev[dd_idx].towrite; @@ -2184,7 +2179,6 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags); } spin_unlock_irq(&conf->device_lock); - spin_unlock(&sh->lock); pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", (unsigned long long)(*bip)->bi_sector, @@ -2201,7 +2195,6 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in overlap: set_bit(R5_Overlap, &sh->dev[dd_idx].flags); spin_unlock_irq(&conf->device_lock); - spin_unlock(&sh->lock); return 0; } @@ -3023,12 +3016,10 @@ static void handle_stripe5(struct stripe_head *sh) atomic_read(&sh->count), sh->pd_idx, sh->check_state, sh->reconstruct_state); - spin_lock(&sh->lock); if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { set_bit(STRIPE_SYNCING, &sh->state); clear_bit(STRIPE_INSYNC, &sh->state); } - clear_bit(STRIPE_HANDLE, &sh->state); clear_bit(STRIPE_DELAYED, &sh->state); s.syncing = test_bit(STRIPE_SYNCING, &sh->state); @@ -3037,6 +3028,7 @@ static void handle_stripe5(struct stripe_head *sh) /* Now to look around and see what can be done */ rcu_read_lock(); + spin_lock_irq(&conf->device_lock); for (i=disks; i--; ) { mdk_rdev_t *rdev; @@ -3099,6 +3091,7 @@ static void handle_stripe5(struct stripe_head *sh) s.failed_num = i; } } + spin_unlock_irq(&conf->device_lock); rcu_read_unlock(); if (unlikely(blocked_rdev)) { @@ -3275,7 +3268,6 @@ static void handle_stripe5(struct stripe_head *sh) handle_stripe_expansion(conf, sh, NULL); unlock: - spin_unlock(&sh->lock); /* wait for this device to become unblocked */ if (unlikely(blocked_rdev)) @@ -3318,12 +3310,10 @@ static void handle_stripe6(struct stripe_head *sh) sh->check_state, sh->reconstruct_state); memset(&s, 0, sizeof(s)); - spin_lock(&sh->lock); if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { set_bit(STRIPE_SYNCING, &sh->state); clear_bit(STRIPE_INSYNC, &sh->state); } - clear_bit(STRIPE_HANDLE, &sh->state); clear_bit(STRIPE_DELAYED, &sh->state); s.syncing = test_bit(STRIPE_SYNCING, &sh->state); @@ -3332,6 +3322,7 @@ static void handle_stripe6(struct stripe_head *sh) /* Now to look around and see what can be done */ rcu_read_lock(); + spin_lock_irq(&conf->device_lock); for (i=disks; i--; ) { mdk_rdev_t *rdev; dev = &sh->dev[i]; @@ -3395,6 +3386,7 @@ static void handle_stripe6(struct stripe_head *sh) s.failed++; } } + spin_unlock_irq(&conf->device_lock); rcu_read_unlock(); if (unlikely(blocked_rdev)) { @@ -3580,7 +3572,6 @@ static void handle_stripe6(struct stripe_head *sh) handle_stripe_expansion(conf, sh, &r6s); unlock: - spin_unlock(&sh->lock); /* wait for this device to become unblocked */ if (unlikely(blocked_rdev)) @@ -3608,10 +3599,19 @@ static void handle_stripe6(struct stripe_head *sh) static void handle_stripe(struct stripe_head *sh) { + clear_bit(STRIPE_HANDLE, &sh->state); + if (test_and_set_bit(STRIPE_ACTIVE, &sh->state)) { + /* already being handled, ensure it gets handled + * again when current action finishes */ + set_bit(STRIPE_HANDLE, &sh->state); + return; + } + if (sh->raid_conf->level == 6) handle_stripe6(sh); else handle_stripe5(sh); + clear_bit(STRIPE_ACTIVE, &sh->state); } static void raid5_activate_delayed(raid5_conf_t *conf) diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index a33001137bf8..bb246d9e0547 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -6,11 +6,11 @@ /* * - * Each stripe contains one buffer per disc. Each buffer can be in + * Each stripe contains one buffer per device. Each buffer can be in * one of a number of states stored in "flags". Changes between - * these states happen *almost* exclusively under a per-stripe - * spinlock. Some very specific changes can happen in bi_end_io, and - * these are not protected by the spin lock. + * these states happen *almost* exclusively under the protection of the + * STRIPE_ACTIVE flag. Some very specific changes can happen in bi_end_io, and + * these are not protected by STRIPE_ACTIVE. * * The flag bits that are used to represent these states are: * R5_UPTODATE and R5_LOCKED @@ -76,12 +76,10 @@ * block and the cached buffer are successfully written, any buffer on * a written list can be returned with b_end_io. * - * The write list and read list both act as fifos. The read list is - * protected by the device_lock. The write and written lists are - * protected by the stripe lock. The device_lock, which can be - * claimed while the stipe lock is held, is only for list - * manipulations and will only be held for a very short time. It can - * be claimed from interrupts. + * The write list and read list both act as fifos. The read list, + * write list and written list are protected by the device_lock. + * The device_lock is only for list manipulations and will only be + * held for a very short time. It can be claimed from interrupts. * * * Stripes in the stripe cache can be on one of two lists (or on @@ -96,7 +94,6 @@ * * The inactive_list, handle_list and hash bucket lists are all protected by the * device_lock. - * - stripes on the inactive_list never have their stripe_lock held. * - stripes have a reference counter. If count==0, they are on a list. * - If a stripe might need handling, STRIPE_HANDLE is set. * - When refcount reaches zero, then if STRIPE_HANDLE it is put on @@ -116,10 +113,10 @@ * attach a request to an active stripe (add_stripe_bh()) * lockdev attach-buffer unlockdev * handle a stripe (handle_stripe()) - * lockstripe clrSTRIPE_HANDLE ... + * setSTRIPE_ACTIVE, clrSTRIPE_HANDLE ... * (lockdev check-buffers unlockdev) .. * change-state .. - * record io/ops needed unlockstripe schedule io/ops + * record io/ops needed clearSTRIPE_ACTIVE schedule io/ops * release an active stripe (release_stripe()) * lockdev if (!--cnt) { if STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev * @@ -128,8 +125,7 @@ * on a cached buffer, and plus one if the stripe is undergoing stripe * operations. * - * Stripe operations are performed outside the stripe lock, - * the stripe operations are: + * The stripe operations are: * -copying data between the stripe cache and user application buffers * -computing blocks to save a disk access, or to recover a missing block * -updating the parity on a write operation (reconstruct write and @@ -159,7 +155,8 @@ */ /* - * Operations state - intermediate states that are visible outside of sh->lock + * Operations state - intermediate states that are visible outside of + * STRIPE_ACTIVE. * In general _idle indicates nothing is running, _run indicates a data * processing operation is active, and _result means the data processing result * is stable and can be acted upon. For simple operations like biofill and @@ -209,7 +206,6 @@ struct stripe_head { short ddf_layout;/* use DDF ordering to calculate Q */ unsigned long state; /* state flags */ atomic_t count; /* nr of active thread/requests */ - spinlock_t lock; int bm_seq; /* sequence number for bitmap flushes */ int disks; /* disks in stripe */ enum check_states check_state; @@ -240,7 +236,7 @@ struct stripe_head { }; /* stripe_head_state - collects and tracks the dynamic state of a stripe_head - * for handle_stripe. It is only valid under spin_lock(sh->lock); + * for handle_stripe. */ struct stripe_head_state { int syncing, expanding, expanded; @@ -290,6 +286,7 @@ struct r6_state { * Stripe state */ enum { + STRIPE_ACTIVE, STRIPE_HANDLE, STRIPE_SYNC_REQUESTED, STRIPE_SYNCING, @@ -339,7 +336,7 @@ enum { * PREREAD_ACTIVE. * In stripe_handle, if we find pre-reading is necessary, we do it if * PREREAD_ACTIVE is set, else we set DELAYED which will send it to the delayed queue. - * HANDLE gets cleared if stripe_handle leave nothing locked. + * HANDLE gets cleared if stripe_handle leaves nothing locked. */ -- cgit v1.2.3 From 82e5a1718b9d0401b826341b9023766d04cb82f2 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 26 Jul 2011 11:35:15 +1000 Subject: md/raid5: move common code into handle_stripe There is common code at the start of handle_stripe5 and handle_stripe6. Move it into handle_stripe. Signed-off-by: NeilBrown Reviewed-by: Namhyung Kim --- drivers/md/raid5.c | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index f8275b5a6fbe..dfb3d9f80a3c 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3016,12 +3016,6 @@ static void handle_stripe5(struct stripe_head *sh) atomic_read(&sh->count), sh->pd_idx, sh->check_state, sh->reconstruct_state); - if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { - set_bit(STRIPE_SYNCING, &sh->state); - clear_bit(STRIPE_INSYNC, &sh->state); - } - clear_bit(STRIPE_DELAYED, &sh->state); - s.syncing = test_bit(STRIPE_SYNCING, &sh->state); s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); @@ -3310,12 +3304,6 @@ static void handle_stripe6(struct stripe_head *sh) sh->check_state, sh->reconstruct_state); memset(&s, 0, sizeof(s)); - if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { - set_bit(STRIPE_SYNCING, &sh->state); - clear_bit(STRIPE_INSYNC, &sh->state); - } - clear_bit(STRIPE_DELAYED, &sh->state); - s.syncing = test_bit(STRIPE_SYNCING, &sh->state); s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); @@ -3607,6 +3595,12 @@ static void handle_stripe(struct stripe_head *sh) return; } + if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { + set_bit(STRIPE_SYNCING, &sh->state); + clear_bit(STRIPE_INSYNC, &sh->state); + } + clear_bit(STRIPE_DELAYED, &sh->state); + if (sh->raid_conf->level == 6) handle_stripe6(sh); else -- cgit v1.2.3 From f2b3b44deee1524ca4f006048e0569f47eefdb74 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 26 Jul 2011 11:35:19 +1000 Subject: md/raid5: unify stripe_head_state and r6_state 'struct stripe_head_state' stores state about the 'current' stripe that is passed around while handling the stripe. For RAID6 there is an extension structure: r6_state, which is also passed around. There is no value in keeping these separate, so move the fields from the latter into the former. This means that all code now needs to treat s->failed_num as an small array, but this is a small cost. Signed-off-by: NeilBrown Reviewed-by: Namhyung Kim --- drivers/md/raid5.c | 77 +++++++++++++++++++++++++++--------------------------- drivers/md/raid5.h | 8 ++---- 2 files changed, 40 insertions(+), 45 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index dfb3d9f80a3c..bbc7792f013c 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2318,7 +2318,7 @@ static int fetch_block5(struct stripe_head *sh, struct stripe_head_state *s, int disk_idx, int disks) { struct r5dev *dev = &sh->dev[disk_idx]; - struct r5dev *failed_dev = &sh->dev[s->failed_num]; + struct r5dev *failed_dev = &sh->dev[s->failed_num[0]]; /* is the data in this block needed, and can we get it? */ if (!test_bit(R5_LOCKED, &dev->flags) && @@ -2334,7 +2334,7 @@ static int fetch_block5(struct stripe_head *sh, struct stripe_head_state *s, * otherwise read it if the backing disk is insync */ if ((s->uptodate == disks - 1) && - (s->failed && disk_idx == s->failed_num)) { + (s->failed && disk_idx == s->failed_num[0])) { set_bit(STRIPE_COMPUTE_RUN, &sh->state); set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); set_bit(R5_Wantcompute, &dev->flags); @@ -2388,11 +2388,11 @@ static void handle_stripe_fill5(struct stripe_head *sh, * 0 to tell the loop in handle_stripe_fill6 to continue */ static int fetch_block6(struct stripe_head *sh, struct stripe_head_state *s, - struct r6_state *r6s, int disk_idx, int disks) + int disk_idx, int disks) { struct r5dev *dev = &sh->dev[disk_idx]; - struct r5dev *fdev[2] = { &sh->dev[r6s->failed_num[0]], - &sh->dev[r6s->failed_num[1]] }; + struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]], + &sh->dev[s->failed_num[1]] }; if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) && @@ -2409,8 +2409,8 @@ static int fetch_block6(struct stripe_head *sh, struct stripe_head_state *s, BUG_ON(test_bit(R5_Wantcompute, &dev->flags)); BUG_ON(test_bit(R5_Wantread, &dev->flags)); if ((s->uptodate == disks - 1) && - (s->failed && (disk_idx == r6s->failed_num[0] || - disk_idx == r6s->failed_num[1]))) { + (s->failed && (disk_idx == s->failed_num[0] || + disk_idx == s->failed_num[1]))) { /* have disk failed, and we're requested to fetch it; * do compute it */ @@ -2465,7 +2465,7 @@ static int fetch_block6(struct stripe_head *sh, struct stripe_head_state *s, * handle_stripe_fill6 - read or compute data to satisfy pending requests. */ static void handle_stripe_fill6(struct stripe_head *sh, - struct stripe_head_state *s, struct r6_state *r6s, + struct stripe_head_state *s, int disks) { int i; @@ -2477,7 +2477,7 @@ static void handle_stripe_fill6(struct stripe_head *sh, if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && !sh->reconstruct_state) for (i = disks; i--; ) - if (fetch_block6(sh, s, r6s, i, disks)) + if (fetch_block6(sh, s, i, disks)) break; set_bit(STRIPE_HANDLE, &sh->state); } @@ -2625,7 +2625,7 @@ static void handle_stripe_dirtying5(raid5_conf_t *conf, static void handle_stripe_dirtying6(raid5_conf_t *conf, struct stripe_head *sh, struct stripe_head_state *s, - struct r6_state *r6s, int disks) + int disks) { int rcw = 0, pd_idx = sh->pd_idx, i; int qd_idx = sh->qd_idx; @@ -2688,7 +2688,7 @@ static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh, s->uptodate--; break; } - dev = &sh->dev[s->failed_num]; + dev = &sh->dev[s->failed_num[0]]; /* fall through */ case check_state_compute_result: sh->check_state = check_state_idle; @@ -2760,7 +2760,7 @@ static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh, static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh, struct stripe_head_state *s, - struct r6_state *r6s, int disks) + int disks) { int pd_idx = sh->pd_idx; int qd_idx = sh->qd_idx; @@ -2779,14 +2779,14 @@ static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh, switch (sh->check_state) { case check_state_idle: /* start a new check operation if there are < 2 failures */ - if (s->failed == r6s->q_failed) { + if (s->failed == s->q_failed) { /* The only possible failed device holds Q, so it * makes sense to check P (If anything else were failed, * we would have used P to recreate it). */ sh->check_state = check_state_run; } - if (!r6s->q_failed && s->failed < 2) { + if (!s->q_failed && s->failed < 2) { /* Q is not failed, and we didn't use it to generate * anything, so it makes sense to check it */ @@ -2828,13 +2828,13 @@ static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh, */ BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */ if (s->failed == 2) { - dev = &sh->dev[r6s->failed_num[1]]; + dev = &sh->dev[s->failed_num[1]]; s->locked++; set_bit(R5_LOCKED, &dev->flags); set_bit(R5_Wantwrite, &dev->flags); } if (s->failed >= 1) { - dev = &sh->dev[r6s->failed_num[0]]; + dev = &sh->dev[s->failed_num[0]]; s->locked++; set_bit(R5_LOCKED, &dev->flags); set_bit(R5_Wantwrite, &dev->flags); @@ -2922,7 +2922,7 @@ static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh, } static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, - struct r6_state *r6s) + struct stripe_head_state *r6s) { int i; @@ -3082,7 +3082,7 @@ static void handle_stripe5(struct stripe_head *sh) clear_bit(R5_Insync, &dev->flags); if (!test_bit(R5_Insync, &dev->flags)) { s.failed++; - s.failed_num = i; + s.failed_num[0] = i; } } spin_unlock_irq(&conf->device_lock); @@ -3107,7 +3107,7 @@ static void handle_stripe5(struct stripe_head *sh) pr_debug("locked=%d uptodate=%d to_read=%d" " to_write=%d failed=%d failed_num=%d\n", s.locked, s.uptodate, s.to_read, s.to_write, - s.failed, s.failed_num); + s.failed, s.failed_num[0]); /* check if the array has lost two devices and, if so, some requests might * need to be failed */ @@ -3127,7 +3127,7 @@ static void handle_stripe5(struct stripe_head *sh) ((test_bit(R5_Insync, &dev->flags) && !test_bit(R5_LOCKED, &dev->flags) && test_bit(R5_UPTODATE, &dev->flags)) || - (s.failed == 1 && s.failed_num == sh->pd_idx))) + (s.failed == 1 && s.failed_num[0] == sh->pd_idx))) handle_stripe_clean_event(conf, sh, disks, &return_bi); /* Now we might consider reading some blocks, either to check/generate @@ -3198,11 +3198,11 @@ static void handle_stripe5(struct stripe_head *sh) * the repair/check process */ if (s.failed == 1 && !conf->mddev->ro && - test_bit(R5_ReadError, &sh->dev[s.failed_num].flags) - && !test_bit(R5_LOCKED, &sh->dev[s.failed_num].flags) - && test_bit(R5_UPTODATE, &sh->dev[s.failed_num].flags) + test_bit(R5_ReadError, &sh->dev[s.failed_num[0]].flags) + && !test_bit(R5_LOCKED, &sh->dev[s.failed_num[0]].flags) + && test_bit(R5_UPTODATE, &sh->dev[s.failed_num[0]].flags) ) { - dev = &sh->dev[s.failed_num]; + dev = &sh->dev[s.failed_num[0]]; if (!test_bit(R5_ReWrite, &dev->flags)) { set_bit(R5_Wantwrite, &dev->flags); set_bit(R5_ReWrite, &dev->flags); @@ -3292,7 +3292,6 @@ static void handle_stripe6(struct stripe_head *sh) struct bio *return_bi = NULL; int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx; struct stripe_head_state s; - struct r6_state r6s; struct r5dev *dev, *pdev, *qdev; mdk_rdev_t *blocked_rdev = NULL; int dec_preread_active = 0; @@ -3370,7 +3369,7 @@ static void handle_stripe6(struct stripe_head *sh) clear_bit(R5_Insync, &dev->flags); if (!test_bit(R5_Insync, &dev->flags)) { if (s.failed < 2) - r6s.failed_num[s.failed] = i; + s.failed_num[s.failed] = i; s.failed++; } } @@ -3396,7 +3395,7 @@ static void handle_stripe6(struct stripe_head *sh) pr_debug("locked=%d uptodate=%d to_read=%d" " to_write=%d failed=%d failed_num=%d,%d\n", s.locked, s.uptodate, s.to_read, s.to_write, s.failed, - r6s.failed_num[0], r6s.failed_num[1]); + s.failed_num[0], s.failed_num[1]); /* check if the array has lost >2 devices and, if so, some requests * might need to be failed */ @@ -3413,17 +3412,17 @@ static void handle_stripe6(struct stripe_head *sh) * are safe, or on a failed drive */ pdev = &sh->dev[pd_idx]; - r6s.p_failed = (s.failed >= 1 && r6s.failed_num[0] == pd_idx) - || (s.failed >= 2 && r6s.failed_num[1] == pd_idx); + s.p_failed = (s.failed >= 1 && s.failed_num[0] == pd_idx) + || (s.failed >= 2 && s.failed_num[1] == pd_idx); qdev = &sh->dev[qd_idx]; - r6s.q_failed = (s.failed >= 1 && r6s.failed_num[0] == qd_idx) - || (s.failed >= 2 && r6s.failed_num[1] == qd_idx); + s.q_failed = (s.failed >= 1 && s.failed_num[0] == qd_idx) + || (s.failed >= 2 && s.failed_num[1] == qd_idx); - if ( s.written && - ( r6s.p_failed || ((test_bit(R5_Insync, &pdev->flags) + if (s.written && + (s.p_failed || ((test_bit(R5_Insync, &pdev->flags) && !test_bit(R5_LOCKED, &pdev->flags) && test_bit(R5_UPTODATE, &pdev->flags)))) && - ( r6s.q_failed || ((test_bit(R5_Insync, &qdev->flags) + (s.q_failed || ((test_bit(R5_Insync, &qdev->flags) && !test_bit(R5_LOCKED, &qdev->flags) && test_bit(R5_UPTODATE, &qdev->flags))))) handle_stripe_clean_event(conf, sh, disks, &return_bi); @@ -3434,7 +3433,7 @@ static void handle_stripe6(struct stripe_head *sh) */ if (s.to_read || s.non_overwrite || (s.to_write && s.failed) || (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding) - handle_stripe_fill6(sh, &s, &r6s, disks); + handle_stripe_fill6(sh, &s, disks); /* Now we check to see if any write operations have recently * completed @@ -3472,7 +3471,7 @@ static void handle_stripe6(struct stripe_head *sh) * block. */ if (s.to_write && !sh->reconstruct_state && !sh->check_state) - handle_stripe_dirtying6(conf, sh, &s, &r6s, disks); + handle_stripe_dirtying6(conf, sh, &s, disks); /* maybe we need to check and possibly fix the parity for this stripe * Any reads will already have been scheduled, so we just see if enough @@ -3483,7 +3482,7 @@ static void handle_stripe6(struct stripe_head *sh) (s.syncing && s.locked == 0 && !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !test_bit(STRIPE_INSYNC, &sh->state))) - handle_parity_checks6(conf, sh, &s, &r6s, disks); + handle_parity_checks6(conf, sh, &s, disks); if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { md_done_sync(conf->mddev, STRIPE_SECTORS,1); @@ -3495,7 +3494,7 @@ static void handle_stripe6(struct stripe_head *sh) */ if (s.failed <= 2 && !conf->mddev->ro) for (i = 0; i < s.failed; i++) { - dev = &sh->dev[r6s.failed_num[i]]; + dev = &sh->dev[s.failed_num[i]]; if (test_bit(R5_ReadError, &dev->flags) && !test_bit(R5_LOCKED, &dev->flags) && test_bit(R5_UPTODATE, &dev->flags) @@ -3557,7 +3556,7 @@ static void handle_stripe6(struct stripe_head *sh) if (s.expanding && s.locked == 0 && !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) - handle_stripe_expansion(conf, sh, &r6s); + handle_stripe_expansion(conf, sh, &s); unlock: diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index bb246d9e0547..05ac5cde3707 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -242,13 +242,9 @@ struct stripe_head_state { int syncing, expanding, expanded; int locked, uptodate, to_read, to_write, failed, written; int to_fill, compute, req_compute, non_overwrite; - int failed_num; + int failed_num[2]; unsigned long ops_request; -}; - -/* r6_state - extra state data only relevant to r6 */ -struct r6_state { - int p_failed, q_failed, failed_num[2]; + int p_failed, q_failed; }; /* Flags */ -- cgit v1.2.3 From c5709ef6a094c72b56355590bfa55cc107e98376 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 26 Jul 2011 11:35:20 +1000 Subject: md/raid5: add some more fields to stripe_head_state Adding these three fields will allow more common code to be moved to handle_stripe() struct field rearrangement by Namhyung Kim. Signed-off-by: NeilBrown Reviewed-by: Namhyung Kim --- drivers/md/raid5.c | 54 ++++++++++++++++++++++++------------------------------ drivers/md/raid5.h | 6 +++++- 2 files changed, 29 insertions(+), 31 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index bbc7792f013c..bc15f48be78d 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3003,12 +3003,9 @@ static void handle_stripe5(struct stripe_head *sh) { raid5_conf_t *conf = sh->raid_conf; int disks = sh->disks, i; - struct bio *return_bi = NULL; struct stripe_head_state s; struct r5dev *dev; - mdk_rdev_t *blocked_rdev = NULL; int prexor; - int dec_preread_active = 0; memset(&s, 0, sizeof(s)); pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d " @@ -3058,9 +3055,9 @@ static void handle_stripe5(struct stripe_head *sh) if (dev->written) s.written++; rdev = rcu_dereference(conf->disks[i].rdev); - if (blocked_rdev == NULL && + if (s.blocked_rdev == NULL && rdev && unlikely(test_bit(Blocked, &rdev->flags))) { - blocked_rdev = rdev; + s.blocked_rdev = rdev; atomic_inc(&rdev->nr_pending); } clear_bit(R5_Insync, &dev->flags); @@ -3088,15 +3085,15 @@ static void handle_stripe5(struct stripe_head *sh) spin_unlock_irq(&conf->device_lock); rcu_read_unlock(); - if (unlikely(blocked_rdev)) { + if (unlikely(s.blocked_rdev)) { if (s.syncing || s.expanding || s.expanded || s.to_write || s.written) { set_bit(STRIPE_HANDLE, &sh->state); goto unlock; } /* There is nothing for the blocked_rdev to block */ - rdev_dec_pending(blocked_rdev, conf->mddev); - blocked_rdev = NULL; + rdev_dec_pending(s.blocked_rdev, conf->mddev); + s.blocked_rdev = NULL; } if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { @@ -3112,7 +3109,7 @@ static void handle_stripe5(struct stripe_head *sh) * need to be failed */ if (s.failed > 1 && s.to_read+s.to_write+s.written) - handle_failed_stripe(conf, sh, &s, disks, &return_bi); + handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); if (s.failed > 1 && s.syncing) { md_done_sync(conf->mddev, STRIPE_SECTORS,0); clear_bit(STRIPE_SYNCING, &sh->state); @@ -3128,7 +3125,7 @@ static void handle_stripe5(struct stripe_head *sh) !test_bit(R5_LOCKED, &dev->flags) && test_bit(R5_UPTODATE, &dev->flags)) || (s.failed == 1 && s.failed_num[0] == sh->pd_idx))) - handle_stripe_clean_event(conf, sh, disks, &return_bi); + handle_stripe_clean_event(conf, sh, disks, &s.return_bi); /* Now we might consider reading some blocks, either to check/generate * parity, or to satisfy requests @@ -3166,7 +3163,7 @@ static void handle_stripe5(struct stripe_head *sh) } } if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) - dec_preread_active = 1; + s.dec_preread_active = 1; } /* Now to consider new write requests and what else, if anything @@ -3264,15 +3261,15 @@ static void handle_stripe5(struct stripe_head *sh) unlock: /* wait for this device to become unblocked */ - if (unlikely(blocked_rdev)) - md_wait_for_blocked_rdev(blocked_rdev, conf->mddev); + if (unlikely(s.blocked_rdev)) + md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev); if (s.ops_request) raid_run_ops(sh, s.ops_request); ops_run_io(sh, &s); - if (dec_preread_active) { + if (s.dec_preread_active) { /* We delay this until after ops_run_io so that if make_request * is waiting on a flush, it won't continue until the writes * have actually been submitted. @@ -3282,19 +3279,16 @@ static void handle_stripe5(struct stripe_head *sh) IO_THRESHOLD) md_wakeup_thread(conf->mddev->thread); } - return_io(return_bi); + return_io(s.return_bi); } static void handle_stripe6(struct stripe_head *sh) { raid5_conf_t *conf = sh->raid_conf; int disks = sh->disks; - struct bio *return_bi = NULL; int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx; struct stripe_head_state s; struct r5dev *dev, *pdev, *qdev; - mdk_rdev_t *blocked_rdev = NULL; - int dec_preread_active = 0; pr_debug("handling stripe %llu, state=%#lx cnt=%d, " "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n", @@ -3345,9 +3339,9 @@ static void handle_stripe6(struct stripe_head *sh) if (dev->written) s.written++; rdev = rcu_dereference(conf->disks[i].rdev); - if (blocked_rdev == NULL && + if (s.blocked_rdev == NULL && rdev && unlikely(test_bit(Blocked, &rdev->flags))) { - blocked_rdev = rdev; + s.blocked_rdev = rdev; atomic_inc(&rdev->nr_pending); } clear_bit(R5_Insync, &dev->flags); @@ -3376,15 +3370,15 @@ static void handle_stripe6(struct stripe_head *sh) spin_unlock_irq(&conf->device_lock); rcu_read_unlock(); - if (unlikely(blocked_rdev)) { + if (unlikely(s.blocked_rdev)) { if (s.syncing || s.expanding || s.expanded || s.to_write || s.written) { set_bit(STRIPE_HANDLE, &sh->state); goto unlock; } /* There is nothing for the blocked_rdev to block */ - rdev_dec_pending(blocked_rdev, conf->mddev); - blocked_rdev = NULL; + rdev_dec_pending(s.blocked_rdev, conf->mddev); + s.blocked_rdev = NULL; } if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { @@ -3400,7 +3394,7 @@ static void handle_stripe6(struct stripe_head *sh) * might need to be failed */ if (s.failed > 2 && s.to_read+s.to_write+s.written) - handle_failed_stripe(conf, sh, &s, disks, &return_bi); + handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); if (s.failed > 2 && s.syncing) { md_done_sync(conf->mddev, STRIPE_SECTORS,0); clear_bit(STRIPE_SYNCING, &sh->state); @@ -3425,7 +3419,7 @@ static void handle_stripe6(struct stripe_head *sh) (s.q_failed || ((test_bit(R5_Insync, &qdev->flags) && !test_bit(R5_LOCKED, &qdev->flags) && test_bit(R5_UPTODATE, &qdev->flags))))) - handle_stripe_clean_event(conf, sh, disks, &return_bi); + handle_stripe_clean_event(conf, sh, disks, &s.return_bi); /* Now we might consider reading some blocks, either to check/generate * parity, or to satisfy requests @@ -3461,7 +3455,7 @@ static void handle_stripe6(struct stripe_head *sh) } } if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) - dec_preread_active = 1; + s.dec_preread_active = 1; } /* Now to consider new write requests and what else, if anything @@ -3561,8 +3555,8 @@ static void handle_stripe6(struct stripe_head *sh) unlock: /* wait for this device to become unblocked */ - if (unlikely(blocked_rdev)) - md_wait_for_blocked_rdev(blocked_rdev, conf->mddev); + if (unlikely(s.blocked_rdev)) + md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev); if (s.ops_request) raid_run_ops(sh, s.ops_request); @@ -3570,7 +3564,7 @@ static void handle_stripe6(struct stripe_head *sh) ops_run_io(sh, &s); - if (dec_preread_active) { + if (s.dec_preread_active) { /* We delay this until after ops_run_io so that if make_request * is waiting on a flush, it won't continue until the writes * have actually been submitted. @@ -3581,7 +3575,7 @@ static void handle_stripe6(struct stripe_head *sh) md_wakeup_thread(conf->mddev->thread); } - return_io(return_bi); + return_io(s.return_bi); } static void handle_stripe(struct stripe_head *sh) diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 05ac5cde3707..68c500af1108 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -243,8 +243,12 @@ struct stripe_head_state { int locked, uptodate, to_read, to_write, failed, written; int to_fill, compute, req_compute, non_overwrite; int failed_num[2]; - unsigned long ops_request; int p_failed, q_failed; + int dec_preread_active; + unsigned long ops_request; + + struct bio *return_bi; + mdk_rdev_t *blocked_rdev; }; /* Flags */ -- cgit v1.2.3 From cc94015a9eac5d511fe9b716624d8fdf9c6e64b2 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 26 Jul 2011 11:35:35 +1000 Subject: md/raid5: move stripe_head_state and more code into handle_stripe. By defining the 'stripe_head_state' in 'handle_stripe', we can move some common code out of handle_stripe[56]() and into handle_stripe. The means that all accesses for stripe_head_state in handle_stripe[56] need to be 's->' instead of 's.', but the compiler should inline those functions and just use a direct stack reference, and future patches while hoist most of this code up into handle_stripe() so we will revert to "s.". Signed-off-by: NeilBrown Reviewed-by: Namhyung Kim --- drivers/md/raid5.c | 340 +++++++++++++++++++++++++---------------------------- 1 file changed, 158 insertions(+), 182 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index bc15f48be78d..36873cfc3291 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2999,24 +2999,13 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, * */ -static void handle_stripe5(struct stripe_head *sh) +static void handle_stripe5(struct stripe_head *sh, struct stripe_head_state *s) { raid5_conf_t *conf = sh->raid_conf; int disks = sh->disks, i; - struct stripe_head_state s; struct r5dev *dev; int prexor; - memset(&s, 0, sizeof(s)); - pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d " - "reconstruct:%d\n", (unsigned long long)sh->sector, sh->state, - atomic_read(&sh->count), sh->pd_idx, sh->check_state, - sh->reconstruct_state); - - s.syncing = test_bit(STRIPE_SYNCING, &sh->state); - s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); - s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); - /* Now to look around and see what can be done */ rcu_read_lock(); spin_lock_irq(&conf->device_lock); @@ -3039,25 +3028,28 @@ static void handle_stripe5(struct stripe_head *sh) set_bit(R5_Wantfill, &dev->flags); /* now count some things */ - if (test_bit(R5_LOCKED, &dev->flags)) s.locked++; - if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++; - if (test_bit(R5_Wantcompute, &dev->flags)) s.compute++; + if (test_bit(R5_LOCKED, &dev->flags)) + s->locked++; + if (test_bit(R5_UPTODATE, &dev->flags)) + s->uptodate++; + if (test_bit(R5_Wantcompute, &dev->flags)) + s->compute++; if (test_bit(R5_Wantfill, &dev->flags)) - s.to_fill++; + s->to_fill++; else if (dev->toread) - s.to_read++; + s->to_read++; if (dev->towrite) { - s.to_write++; + s->to_write++; if (!test_bit(R5_OVERWRITE, &dev->flags)) - s.non_overwrite++; + s->non_overwrite++; } if (dev->written) - s.written++; + s->written++; rdev = rcu_dereference(conf->disks[i].rdev); - if (s.blocked_rdev == NULL && + if (s->blocked_rdev == NULL && rdev && unlikely(test_bit(Blocked, &rdev->flags))) { - s.blocked_rdev = rdev; + s->blocked_rdev = rdev; atomic_inc(&rdev->nr_pending); } clear_bit(R5_Insync, &dev->flags); @@ -3078,62 +3070,62 @@ static void handle_stripe5(struct stripe_head *sh) if (test_bit(R5_ReadError, &dev->flags)) clear_bit(R5_Insync, &dev->flags); if (!test_bit(R5_Insync, &dev->flags)) { - s.failed++; - s.failed_num[0] = i; + s->failed++; + s->failed_num[0] = i; } } spin_unlock_irq(&conf->device_lock); rcu_read_unlock(); - if (unlikely(s.blocked_rdev)) { - if (s.syncing || s.expanding || s.expanded || - s.to_write || s.written) { + if (unlikely(s->blocked_rdev)) { + if (s->syncing || s->expanding || s->expanded || + s->to_write || s->written) { set_bit(STRIPE_HANDLE, &sh->state); - goto unlock; + return; } /* There is nothing for the blocked_rdev to block */ - rdev_dec_pending(s.blocked_rdev, conf->mddev); - s.blocked_rdev = NULL; + rdev_dec_pending(s->blocked_rdev, conf->mddev); + s->blocked_rdev = NULL; } - if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { - set_bit(STRIPE_OP_BIOFILL, &s.ops_request); + if (s->to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { + set_bit(STRIPE_OP_BIOFILL, &s->ops_request); set_bit(STRIPE_BIOFILL_RUN, &sh->state); } pr_debug("locked=%d uptodate=%d to_read=%d" " to_write=%d failed=%d failed_num=%d\n", - s.locked, s.uptodate, s.to_read, s.to_write, - s.failed, s.failed_num[0]); + s->locked, s->uptodate, s->to_read, s->to_write, + s->failed, s->failed_num[0]); /* check if the array has lost two devices and, if so, some requests might * need to be failed */ - if (s.failed > 1 && s.to_read+s.to_write+s.written) - handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); - if (s.failed > 1 && s.syncing) { + if (s->failed > 1 && s->to_read+s->to_write+s->written) + handle_failed_stripe(conf, sh, s, disks, &s->return_bi); + if (s->failed > 1 && s->syncing) { md_done_sync(conf->mddev, STRIPE_SECTORS,0); clear_bit(STRIPE_SYNCING, &sh->state); - s.syncing = 0; + s->syncing = 0; } /* might be able to return some write requests if the parity block * is safe, or on a failed drive */ dev = &sh->dev[sh->pd_idx]; - if ( s.written && - ((test_bit(R5_Insync, &dev->flags) && - !test_bit(R5_LOCKED, &dev->flags) && - test_bit(R5_UPTODATE, &dev->flags)) || - (s.failed == 1 && s.failed_num[0] == sh->pd_idx))) - handle_stripe_clean_event(conf, sh, disks, &s.return_bi); + if (s->written && + ((test_bit(R5_Insync, &dev->flags) && + !test_bit(R5_LOCKED, &dev->flags) && + test_bit(R5_UPTODATE, &dev->flags)) || + (s->failed == 1 && s->failed_num[0] == sh->pd_idx))) + handle_stripe_clean_event(conf, sh, disks, &s->return_bi); /* Now we might consider reading some blocks, either to check/generate * parity, or to satisfy requests * or to load a block that is being partially written. */ - if (s.to_read || s.non_overwrite || - (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding) - handle_stripe_fill5(sh, &s, disks); + if (s->to_read || s->non_overwrite || + (s->syncing && (s->uptodate + s->compute < disks)) || s->expanding) + handle_stripe_fill5(sh, s, disks); /* Now we check to see if any write operations have recently * completed @@ -3158,12 +3150,12 @@ static void handle_stripe5(struct stripe_head *sh) if (prexor) continue; if (!test_bit(R5_Insync, &dev->flags) || - (i == sh->pd_idx && s.failed == 0)) + (i == sh->pd_idx && s->failed == 0)) set_bit(STRIPE_INSYNC, &sh->state); } } if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) - s.dec_preread_active = 1; + s->dec_preread_active = 1; } /* Now to consider new write requests and what else, if anything @@ -3172,8 +3164,8 @@ static void handle_stripe5(struct stripe_head *sh) * 2/ A 'check' operation is in flight, as it may clobber the parity * block. */ - if (s.to_write && !sh->reconstruct_state && !sh->check_state) - handle_stripe_dirtying5(conf, sh, &s, disks); + if (s->to_write && !sh->reconstruct_state && !sh->check_state) + handle_stripe_dirtying5(conf, sh, s, disks); /* maybe we need to check and possibly fix the parity for this stripe * Any reads will already have been scheduled, so we just see if enough @@ -3181,12 +3173,13 @@ static void handle_stripe5(struct stripe_head *sh) * dependent operations are in flight. */ if (sh->check_state || - (s.syncing && s.locked == 0 && + (s->syncing && s->locked == 0 && !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !test_bit(STRIPE_INSYNC, &sh->state))) - handle_parity_checks5(conf, sh, &s, disks); + handle_parity_checks5(conf, sh, s, disks); - if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { + if (s->syncing && s->locked == 0 + && test_bit(STRIPE_INSYNC, &sh->state)) { md_done_sync(conf->mddev, STRIPE_SECTORS,1); clear_bit(STRIPE_SYNCING, &sh->state); } @@ -3194,22 +3187,22 @@ static void handle_stripe5(struct stripe_head *sh) /* If the failed drive is just a ReadError, then we might need to progress * the repair/check process */ - if (s.failed == 1 && !conf->mddev->ro && - test_bit(R5_ReadError, &sh->dev[s.failed_num[0]].flags) - && !test_bit(R5_LOCKED, &sh->dev[s.failed_num[0]].flags) - && test_bit(R5_UPTODATE, &sh->dev[s.failed_num[0]].flags) + if (s->failed == 1 && !conf->mddev->ro && + test_bit(R5_ReadError, &sh->dev[s->failed_num[0]].flags) + && !test_bit(R5_LOCKED, &sh->dev[s->failed_num[0]].flags) + && test_bit(R5_UPTODATE, &sh->dev[s->failed_num[0]].flags) ) { - dev = &sh->dev[s.failed_num[0]]; + dev = &sh->dev[s->failed_num[0]]; if (!test_bit(R5_ReWrite, &dev->flags)) { set_bit(R5_Wantwrite, &dev->flags); set_bit(R5_ReWrite, &dev->flags); set_bit(R5_LOCKED, &dev->flags); - s.locked++; + s->locked++; } else { /* let's read it back */ set_bit(R5_Wantread, &dev->flags); set_bit(R5_LOCKED, &dev->flags); - s.locked++; + s->locked++; } } @@ -3227,7 +3220,7 @@ static void handle_stripe5(struct stripe_head *sh) &sh2->state)) atomic_inc(&conf->preread_active_stripes); release_stripe(sh2); - goto unlock; + return; } if (sh2) release_stripe(sh2); @@ -3237,69 +3230,35 @@ static void handle_stripe5(struct stripe_head *sh) for (i = conf->raid_disks; i--; ) { set_bit(R5_Wantwrite, &sh->dev[i].flags); set_bit(R5_LOCKED, &sh->dev[i].flags); - s.locked++; + s->locked++; } } - if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && + if (s->expanded && test_bit(STRIPE_EXPANDING, &sh->state) && !sh->reconstruct_state) { /* Need to write out all blocks after computing parity */ sh->disks = conf->raid_disks; stripe_set_idx(sh->sector, conf, 0, sh); - schedule_reconstruction(sh, &s, 1, 1); - } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { + schedule_reconstruction(sh, s, 1, 1); + } else if (s->expanded && !sh->reconstruct_state && s->locked == 0) { clear_bit(STRIPE_EXPAND_READY, &sh->state); atomic_dec(&conf->reshape_stripes); wake_up(&conf->wait_for_overlap); md_done_sync(conf->mddev, STRIPE_SECTORS, 1); } - if (s.expanding && s.locked == 0 && + if (s->expanding && s->locked == 0 && !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) handle_stripe_expansion(conf, sh, NULL); - - unlock: - - /* wait for this device to become unblocked */ - if (unlikely(s.blocked_rdev)) - md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev); - - if (s.ops_request) - raid_run_ops(sh, s.ops_request); - - ops_run_io(sh, &s); - - if (s.dec_preread_active) { - /* We delay this until after ops_run_io so that if make_request - * is waiting on a flush, it won't continue until the writes - * have actually been submitted. - */ - atomic_dec(&conf->preread_active_stripes); - if (atomic_read(&conf->preread_active_stripes) < - IO_THRESHOLD) - md_wakeup_thread(conf->mddev->thread); - } - return_io(s.return_bi); } -static void handle_stripe6(struct stripe_head *sh) +static void handle_stripe6(struct stripe_head *sh, struct stripe_head_state *s) { raid5_conf_t *conf = sh->raid_conf; int disks = sh->disks; int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx; - struct stripe_head_state s; struct r5dev *dev, *pdev, *qdev; - pr_debug("handling stripe %llu, state=%#lx cnt=%d, " - "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n", - (unsigned long long)sh->sector, sh->state, - atomic_read(&sh->count), pd_idx, qd_idx, - sh->check_state, sh->reconstruct_state); - memset(&s, 0, sizeof(s)); - - s.syncing = test_bit(STRIPE_SYNCING, &sh->state); - s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); - s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); /* Now to look around and see what can be done */ rcu_read_lock(); @@ -3320,28 +3279,30 @@ static void handle_stripe6(struct stripe_head *sh) set_bit(R5_Wantfill, &dev->flags); /* now count some things */ - if (test_bit(R5_LOCKED, &dev->flags)) s.locked++; - if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++; + if (test_bit(R5_LOCKED, &dev->flags)) + s->locked++; + if (test_bit(R5_UPTODATE, &dev->flags)) + s->uptodate++; if (test_bit(R5_Wantcompute, &dev->flags)) { - s.compute++; - BUG_ON(s.compute > 2); + s->compute++; + BUG_ON(s->compute > 2); } if (test_bit(R5_Wantfill, &dev->flags)) { - s.to_fill++; + s->to_fill++; } else if (dev->toread) - s.to_read++; + s->to_read++; if (dev->towrite) { - s.to_write++; + s->to_write++; if (!test_bit(R5_OVERWRITE, &dev->flags)) - s.non_overwrite++; + s->non_overwrite++; } if (dev->written) - s.written++; + s->written++; rdev = rcu_dereference(conf->disks[i].rdev); - if (s.blocked_rdev == NULL && + if (s->blocked_rdev == NULL && rdev && unlikely(test_bit(Blocked, &rdev->flags))) { - s.blocked_rdev = rdev; + s->blocked_rdev = rdev; atomic_inc(&rdev->nr_pending); } clear_bit(R5_Insync, &dev->flags); @@ -3362,43 +3323,43 @@ static void handle_stripe6(struct stripe_head *sh) if (test_bit(R5_ReadError, &dev->flags)) clear_bit(R5_Insync, &dev->flags); if (!test_bit(R5_Insync, &dev->flags)) { - if (s.failed < 2) - s.failed_num[s.failed] = i; - s.failed++; + if (s->failed < 2) + s->failed_num[s->failed] = i; + s->failed++; } } spin_unlock_irq(&conf->device_lock); rcu_read_unlock(); - if (unlikely(s.blocked_rdev)) { - if (s.syncing || s.expanding || s.expanded || - s.to_write || s.written) { + if (unlikely(s->blocked_rdev)) { + if (s->syncing || s->expanding || s->expanded || + s->to_write || s->written) { set_bit(STRIPE_HANDLE, &sh->state); - goto unlock; + return; } /* There is nothing for the blocked_rdev to block */ - rdev_dec_pending(s.blocked_rdev, conf->mddev); - s.blocked_rdev = NULL; + rdev_dec_pending(s->blocked_rdev, conf->mddev); + s->blocked_rdev = NULL; } - if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { - set_bit(STRIPE_OP_BIOFILL, &s.ops_request); + if (s->to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { + set_bit(STRIPE_OP_BIOFILL, &s->ops_request); set_bit(STRIPE_BIOFILL_RUN, &sh->state); } pr_debug("locked=%d uptodate=%d to_read=%d" " to_write=%d failed=%d failed_num=%d,%d\n", - s.locked, s.uptodate, s.to_read, s.to_write, s.failed, - s.failed_num[0], s.failed_num[1]); + s->locked, s->uptodate, s->to_read, s->to_write, s->failed, + s->failed_num[0], s->failed_num[1]); /* check if the array has lost >2 devices and, if so, some requests * might need to be failed */ - if (s.failed > 2 && s.to_read+s.to_write+s.written) - handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); - if (s.failed > 2 && s.syncing) { + if (s->failed > 2 && s->to_read+s->to_write+s->written) + handle_failed_stripe(conf, sh, s, disks, &s->return_bi); + if (s->failed > 2 && s->syncing) { md_done_sync(conf->mddev, STRIPE_SECTORS,0); clear_bit(STRIPE_SYNCING, &sh->state); - s.syncing = 0; + s->syncing = 0; } /* @@ -3406,28 +3367,28 @@ static void handle_stripe6(struct stripe_head *sh) * are safe, or on a failed drive */ pdev = &sh->dev[pd_idx]; - s.p_failed = (s.failed >= 1 && s.failed_num[0] == pd_idx) - || (s.failed >= 2 && s.failed_num[1] == pd_idx); + s->p_failed = (s->failed >= 1 && s->failed_num[0] == pd_idx) + || (s->failed >= 2 && s->failed_num[1] == pd_idx); qdev = &sh->dev[qd_idx]; - s.q_failed = (s.failed >= 1 && s.failed_num[0] == qd_idx) - || (s.failed >= 2 && s.failed_num[1] == qd_idx); + s->q_failed = (s->failed >= 1 && s->failed_num[0] == qd_idx) + || (s->failed >= 2 && s->failed_num[1] == qd_idx); - if (s.written && - (s.p_failed || ((test_bit(R5_Insync, &pdev->flags) + if (s->written && + (s->p_failed || ((test_bit(R5_Insync, &pdev->flags) && !test_bit(R5_LOCKED, &pdev->flags) && test_bit(R5_UPTODATE, &pdev->flags)))) && - (s.q_failed || ((test_bit(R5_Insync, &qdev->flags) + (s->q_failed || ((test_bit(R5_Insync, &qdev->flags) && !test_bit(R5_LOCKED, &qdev->flags) && test_bit(R5_UPTODATE, &qdev->flags))))) - handle_stripe_clean_event(conf, sh, disks, &s.return_bi); + handle_stripe_clean_event(conf, sh, disks, &s->return_bi); /* Now we might consider reading some blocks, either to check/generate * parity, or to satisfy requests * or to load a block that is being partially written. */ - if (s.to_read || s.non_overwrite || (s.to_write && s.failed) || - (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding) - handle_stripe_fill6(sh, &s, disks); + if (s->to_read || s->non_overwrite || (s->to_write && s->failed) || + (s->syncing && (s->uptodate + s->compute < disks)) || s->expanding) + handle_stripe_fill6(sh, s, disks); /* Now we check to see if any write operations have recently * completed @@ -3450,12 +3411,12 @@ static void handle_stripe6(struct stripe_head *sh) set_bit(R5_Wantwrite, &dev->flags); if (!test_bit(R5_Insync, &dev->flags) || ((i == sh->pd_idx || i == qd_idx) && - s.failed == 0)) + s->failed == 0)) set_bit(STRIPE_INSYNC, &sh->state); } } if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) - s.dec_preread_active = 1; + s->dec_preread_active = 1; } /* Now to consider new write requests and what else, if anything @@ -3464,8 +3425,8 @@ static void handle_stripe6(struct stripe_head *sh) * 2/ A 'check' operation is in flight, as it may clobber the parity * block. */ - if (s.to_write && !sh->reconstruct_state && !sh->check_state) - handle_stripe_dirtying6(conf, sh, &s, disks); + if (s->to_write && !sh->reconstruct_state && !sh->check_state) + handle_stripe_dirtying6(conf, sh, s, disks); /* maybe we need to check and possibly fix the parity for this stripe * Any reads will already have been scheduled, so we just see if enough @@ -3473,12 +3434,13 @@ static void handle_stripe6(struct stripe_head *sh) * dependent operations are in flight. */ if (sh->check_state || - (s.syncing && s.locked == 0 && + (s->syncing && s->locked == 0 && !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !test_bit(STRIPE_INSYNC, &sh->state))) - handle_parity_checks6(conf, sh, &s, disks); + handle_parity_checks6(conf, sh, s, disks); - if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { + if (s->syncing && s->locked == 0 + && test_bit(STRIPE_INSYNC, &sh->state)) { md_done_sync(conf->mddev, STRIPE_SECTORS,1); clear_bit(STRIPE_SYNCING, &sh->state); } @@ -3486,9 +3448,9 @@ static void handle_stripe6(struct stripe_head *sh) /* If the failed drives are just a ReadError, then we might need * to progress the repair/check process */ - if (s.failed <= 2 && !conf->mddev->ro) - for (i = 0; i < s.failed; i++) { - dev = &sh->dev[s.failed_num[i]]; + if (s->failed <= 2 && !conf->mddev->ro) + for (i = 0; i < s->failed; i++) { + dev = &sh->dev[s->failed_num[i]]; if (test_bit(R5_ReadError, &dev->flags) && !test_bit(R5_LOCKED, &dev->flags) && test_bit(R5_UPTODATE, &dev->flags) @@ -3497,12 +3459,12 @@ static void handle_stripe6(struct stripe_head *sh) set_bit(R5_Wantwrite, &dev->flags); set_bit(R5_ReWrite, &dev->flags); set_bit(R5_LOCKED, &dev->flags); - s.locked++; + s->locked++; } else { /* let's read it back */ set_bit(R5_Wantread, &dev->flags); set_bit(R5_LOCKED, &dev->flags); - s.locked++; + s->locked++; } } } @@ -3514,11 +3476,11 @@ static void handle_stripe6(struct stripe_head *sh) for (i = conf->raid_disks; i--; ) { set_bit(R5_Wantwrite, &sh->dev[i].flags); set_bit(R5_LOCKED, &sh->dev[i].flags); - s.locked++; + s->locked++; } } - if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && + if (s->expanded && test_bit(STRIPE_EXPANDING, &sh->state) && !sh->reconstruct_state) { struct stripe_head *sh2 = get_active_stripe(conf, sh->sector, 1, 1, 1); @@ -3532,7 +3494,7 @@ static void handle_stripe6(struct stripe_head *sh) &sh2->state)) atomic_inc(&conf->preread_active_stripes); release_stripe(sh2); - goto unlock; + return; } if (sh2) release_stripe(sh2); @@ -3540,19 +3502,54 @@ static void handle_stripe6(struct stripe_head *sh) /* Need to write out all blocks after computing P&Q */ sh->disks = conf->raid_disks; stripe_set_idx(sh->sector, conf, 0, sh); - schedule_reconstruction(sh, &s, 1, 1); - } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { + schedule_reconstruction(sh, s, 1, 1); + } else if (s->expanded && !sh->reconstruct_state && s->locked == 0) { clear_bit(STRIPE_EXPAND_READY, &sh->state); atomic_dec(&conf->reshape_stripes); wake_up(&conf->wait_for_overlap); md_done_sync(conf->mddev, STRIPE_SECTORS, 1); } - if (s.expanding && s.locked == 0 && + if (s->expanding && s->locked == 0 && !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) - handle_stripe_expansion(conf, sh, &s); + handle_stripe_expansion(conf, sh, s); +} + +static void handle_stripe(struct stripe_head *sh) +{ + struct stripe_head_state s; + raid5_conf_t *conf = sh->raid_conf; + + clear_bit(STRIPE_HANDLE, &sh->state); + if (test_and_set_bit(STRIPE_ACTIVE, &sh->state)) { + /* already being handled, ensure it gets handled + * again when current action finishes */ + set_bit(STRIPE_HANDLE, &sh->state); + return; + } + + if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { + set_bit(STRIPE_SYNCING, &sh->state); + clear_bit(STRIPE_INSYNC, &sh->state); + } + clear_bit(STRIPE_DELAYED, &sh->state); + + pr_debug("handling stripe %llu, state=%#lx cnt=%d, " + "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n", + (unsigned long long)sh->sector, sh->state, + atomic_read(&sh->count), sh->pd_idx, sh->qd_idx, + sh->check_state, sh->reconstruct_state); + memset(&s, 0, sizeof(s)); + + s.syncing = test_bit(STRIPE_SYNCING, &sh->state); + s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); + s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); + + if (conf->level == 6) + handle_stripe6(sh, &s); + else + handle_stripe5(sh, &s); - unlock: /* wait for this device to become unblocked */ if (unlikely(s.blocked_rdev)) @@ -3576,28 +3573,7 @@ static void handle_stripe6(struct stripe_head *sh) } return_io(s.return_bi); -} -static void handle_stripe(struct stripe_head *sh) -{ - clear_bit(STRIPE_HANDLE, &sh->state); - if (test_and_set_bit(STRIPE_ACTIVE, &sh->state)) { - /* already being handled, ensure it gets handled - * again when current action finishes */ - set_bit(STRIPE_HANDLE, &sh->state); - return; - } - - if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { - set_bit(STRIPE_SYNCING, &sh->state); - clear_bit(STRIPE_INSYNC, &sh->state); - } - clear_bit(STRIPE_DELAYED, &sh->state); - - if (sh->raid_conf->level == 6) - handle_stripe6(sh); - else - handle_stripe5(sh); clear_bit(STRIPE_ACTIVE, &sh->state); } -- cgit v1.2.3 From 86c374ba9f6726a79a032ede741dc66d219b166e Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 27 Jul 2011 11:00:36 +1000 Subject: md/raid5: Remove stripe_head_state arg from handle_stripe_expansion. This arg is only used to differentiate between RAID5 and RAID6 but that is not needed. For RAID5, raid5_compute_sector will set qd_idx to "~0" so j with certainly not equals qd_idx, so there is no need for a guard on that condition. So remove the guard and remove the arg from the declaration and callers of handle_stripe_expansion. Signed-off-by: NeilBrown Reviewed-by: Namhyung Kim --- drivers/md/raid5.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 36873cfc3291..793dd76aeae0 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2921,8 +2921,7 @@ static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh, } } -static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, - struct stripe_head_state *r6s) +static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh) { int i; @@ -2964,7 +2963,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); for (j = 0; j < conf->raid_disks; j++) if (j != sh2->pd_idx && - (!r6s || j != sh2->qd_idx) && + j != sh2->qd_idx && !test_bit(R5_Expanded, &sh2->dev[j].flags)) break; if (j == conf->raid_disks) { @@ -3249,7 +3248,7 @@ static void handle_stripe5(struct stripe_head *sh, struct stripe_head_state *s) if (s->expanding && s->locked == 0 && !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) - handle_stripe_expansion(conf, sh, NULL); + handle_stripe_expansion(conf, sh); } static void handle_stripe6(struct stripe_head *sh, struct stripe_head_state *s) @@ -3512,7 +3511,7 @@ static void handle_stripe6(struct stripe_head *sh, struct stripe_head_state *s) if (s->expanding && s->locked == 0 && !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) - handle_stripe_expansion(conf, sh, s); + handle_stripe_expansion(conf, sh); } static void handle_stripe(struct stripe_head *sh) -- cgit v1.2.3 From 3687c061886dd0bfec07e131ad12f916ef0abc62 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 27 Jul 2011 11:00:36 +1000 Subject: md/raid5: Move code for finishing a reconstruction into handle_stripe. Prior to commit ab69ae12ceef7 the code in handle_stripe5 and handle_stripe6 to "Finish reconstruct operations initiated by the expansion process" was identical. That commit added an identical stanza of code to each function, but in different places. That was careless. The raid5 code was correct, so move that out into handle_stripe and remove raid6 version. Signed-off-by: NeilBrown Reviewed-by: Namhyung Kim --- drivers/md/raid5.c | 153 ++++++++++++++++++++--------------------------------- 1 file changed, 57 insertions(+), 96 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 793dd76aeae0..cd6f04f145e6 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2998,7 +2998,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh) * */ -static void handle_stripe5(struct stripe_head *sh, struct stripe_head_state *s) +static int handle_stripe5(struct stripe_head *sh, struct stripe_head_state *s) { raid5_conf_t *conf = sh->raid_conf; int disks = sh->disks, i; @@ -3080,7 +3080,7 @@ static void handle_stripe5(struct stripe_head *sh, struct stripe_head_state *s) if (s->syncing || s->expanding || s->expanded || s->to_write || s->written) { set_bit(STRIPE_HANDLE, &sh->state); - return; + return 1; } /* There is nothing for the blocked_rdev to block */ rdev_dec_pending(s->blocked_rdev, conf->mddev); @@ -3204,54 +3204,10 @@ static void handle_stripe5(struct stripe_head *sh, struct stripe_head_state *s) s->locked++; } } - - /* Finish reconstruct operations initiated by the expansion process */ - if (sh->reconstruct_state == reconstruct_state_result) { - struct stripe_head *sh2 - = get_active_stripe(conf, sh->sector, 1, 1, 1); - if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) { - /* sh cannot be written until sh2 has been read. - * so arrange for sh to be delayed a little - */ - set_bit(STRIPE_DELAYED, &sh->state); - set_bit(STRIPE_HANDLE, &sh->state); - if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, - &sh2->state)) - atomic_inc(&conf->preread_active_stripes); - release_stripe(sh2); - return; - } - if (sh2) - release_stripe(sh2); - - sh->reconstruct_state = reconstruct_state_idle; - clear_bit(STRIPE_EXPANDING, &sh->state); - for (i = conf->raid_disks; i--; ) { - set_bit(R5_Wantwrite, &sh->dev[i].flags); - set_bit(R5_LOCKED, &sh->dev[i].flags); - s->locked++; - } - } - - if (s->expanded && test_bit(STRIPE_EXPANDING, &sh->state) && - !sh->reconstruct_state) { - /* Need to write out all blocks after computing parity */ - sh->disks = conf->raid_disks; - stripe_set_idx(sh->sector, conf, 0, sh); - schedule_reconstruction(sh, s, 1, 1); - } else if (s->expanded && !sh->reconstruct_state && s->locked == 0) { - clear_bit(STRIPE_EXPAND_READY, &sh->state); - atomic_dec(&conf->reshape_stripes); - wake_up(&conf->wait_for_overlap); - md_done_sync(conf->mddev, STRIPE_SECTORS, 1); - } - - if (s->expanding && s->locked == 0 && - !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) - handle_stripe_expansion(conf, sh); + return 0; } -static void handle_stripe6(struct stripe_head *sh, struct stripe_head_state *s) +static int handle_stripe6(struct stripe_head *sh, struct stripe_head_state *s) { raid5_conf_t *conf = sh->raid_conf; int disks = sh->disks; @@ -3334,7 +3290,7 @@ static void handle_stripe6(struct stripe_head *sh, struct stripe_head_state *s) if (s->syncing || s->expanding || s->expanded || s->to_write || s->written) { set_bit(STRIPE_HANDLE, &sh->state); - return; + return 1; } /* There is nothing for the blocked_rdev to block */ rdev_dec_pending(s->blocked_rdev, conf->mddev); @@ -3467,56 +3423,14 @@ static void handle_stripe6(struct stripe_head *sh, struct stripe_head_state *s) } } } - - /* Finish reconstruct operations initiated by the expansion process */ - if (sh->reconstruct_state == reconstruct_state_result) { - sh->reconstruct_state = reconstruct_state_idle; - clear_bit(STRIPE_EXPANDING, &sh->state); - for (i = conf->raid_disks; i--; ) { - set_bit(R5_Wantwrite, &sh->dev[i].flags); - set_bit(R5_LOCKED, &sh->dev[i].flags); - s->locked++; - } - } - - if (s->expanded && test_bit(STRIPE_EXPANDING, &sh->state) && - !sh->reconstruct_state) { - struct stripe_head *sh2 - = get_active_stripe(conf, sh->sector, 1, 1, 1); - if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) { - /* sh cannot be written until sh2 has been read. - * so arrange for sh to be delayed a little - */ - set_bit(STRIPE_DELAYED, &sh->state); - set_bit(STRIPE_HANDLE, &sh->state); - if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, - &sh2->state)) - atomic_inc(&conf->preread_active_stripes); - release_stripe(sh2); - return; - } - if (sh2) - release_stripe(sh2); - - /* Need to write out all blocks after computing P&Q */ - sh->disks = conf->raid_disks; - stripe_set_idx(sh->sector, conf, 0, sh); - schedule_reconstruction(sh, s, 1, 1); - } else if (s->expanded && !sh->reconstruct_state && s->locked == 0) { - clear_bit(STRIPE_EXPAND_READY, &sh->state); - atomic_dec(&conf->reshape_stripes); - wake_up(&conf->wait_for_overlap); - md_done_sync(conf->mddev, STRIPE_SECTORS, 1); - } - - if (s->expanding && s->locked == 0 && - !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) - handle_stripe_expansion(conf, sh); + return 0; } static void handle_stripe(struct stripe_head *sh) { struct stripe_head_state s; + int done; + int i; raid5_conf_t *conf = sh->raid_conf; clear_bit(STRIPE_HANDLE, &sh->state); @@ -3545,11 +3459,58 @@ static void handle_stripe(struct stripe_head *sh) s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); if (conf->level == 6) - handle_stripe6(sh, &s); + done = handle_stripe6(sh, &s); else - handle_stripe5(sh, &s); + done = handle_stripe5(sh, &s); + + if (done) + goto finish; + /* Finish reconstruct operations initiated by the expansion process */ + if (sh->reconstruct_state == reconstruct_state_result) { + struct stripe_head *sh_src + = get_active_stripe(conf, sh->sector, 1, 1, 1); + if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) { + /* sh cannot be written until sh_src has been read. + * so arrange for sh to be delayed a little + */ + set_bit(STRIPE_DELAYED, &sh->state); + set_bit(STRIPE_HANDLE, &sh->state); + if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, + &sh_src->state)) + atomic_inc(&conf->preread_active_stripes); + release_stripe(sh_src); + goto finish; + } + if (sh_src) + release_stripe(sh_src); + + sh->reconstruct_state = reconstruct_state_idle; + clear_bit(STRIPE_EXPANDING, &sh->state); + for (i = conf->raid_disks; i--; ) { + set_bit(R5_Wantwrite, &sh->dev[i].flags); + set_bit(R5_LOCKED, &sh->dev[i].flags); + s.locked++; + } + } + if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && + !sh->reconstruct_state) { + /* Need to write out all blocks after computing parity */ + sh->disks = conf->raid_disks; + stripe_set_idx(sh->sector, conf, 0, sh); + schedule_reconstruction(sh, &s, 1, 1); + } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { + clear_bit(STRIPE_EXPAND_READY, &sh->state); + atomic_dec(&conf->reshape_stripes); + wake_up(&conf->wait_for_overlap); + md_done_sync(conf->mddev, STRIPE_SECTORS, 1); + } + + if (s.expanding && s.locked == 0 && + !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) + handle_stripe_expansion(conf, sh); +finish: /* wait for this device to become unblocked */ if (unlikely(s.blocked_rdev)) md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev); -- cgit v1.2.3 From c5a3100062cf277d3edd4e6f4a1f1e403524b464 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 27 Jul 2011 11:00:36 +1000 Subject: md/raid5: move more code into common handle_stripe The difference between the RAID5 and RAID6 code here is easily resolved using conf->max_degraded. Signed-off-by: NeilBrown Reviewed-by: Namhyung Kim --- drivers/md/raid5.c | 90 +++++++++++++++++++----------------------------------- 1 file changed, 32 insertions(+), 58 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index cd6f04f145e6..74a575bbd25c 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3176,34 +3176,6 @@ static int handle_stripe5(struct stripe_head *sh, struct stripe_head_state *s) !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !test_bit(STRIPE_INSYNC, &sh->state))) handle_parity_checks5(conf, sh, s, disks); - - if (s->syncing && s->locked == 0 - && test_bit(STRIPE_INSYNC, &sh->state)) { - md_done_sync(conf->mddev, STRIPE_SECTORS,1); - clear_bit(STRIPE_SYNCING, &sh->state); - } - - /* If the failed drive is just a ReadError, then we might need to progress - * the repair/check process - */ - if (s->failed == 1 && !conf->mddev->ro && - test_bit(R5_ReadError, &sh->dev[s->failed_num[0]].flags) - && !test_bit(R5_LOCKED, &sh->dev[s->failed_num[0]].flags) - && test_bit(R5_UPTODATE, &sh->dev[s->failed_num[0]].flags) - ) { - dev = &sh->dev[s->failed_num[0]]; - if (!test_bit(R5_ReWrite, &dev->flags)) { - set_bit(R5_Wantwrite, &dev->flags); - set_bit(R5_ReWrite, &dev->flags); - set_bit(R5_LOCKED, &dev->flags); - s->locked++; - } else { - /* let's read it back */ - set_bit(R5_Wantread, &dev->flags); - set_bit(R5_LOCKED, &dev->flags); - s->locked++; - } - } return 0; } @@ -3393,36 +3365,6 @@ static int handle_stripe6(struct stripe_head *sh, struct stripe_head_state *s) !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !test_bit(STRIPE_INSYNC, &sh->state))) handle_parity_checks6(conf, sh, s, disks); - - if (s->syncing && s->locked == 0 - && test_bit(STRIPE_INSYNC, &sh->state)) { - md_done_sync(conf->mddev, STRIPE_SECTORS,1); - clear_bit(STRIPE_SYNCING, &sh->state); - } - - /* If the failed drives are just a ReadError, then we might need - * to progress the repair/check process - */ - if (s->failed <= 2 && !conf->mddev->ro) - for (i = 0; i < s->failed; i++) { - dev = &sh->dev[s->failed_num[i]]; - if (test_bit(R5_ReadError, &dev->flags) - && !test_bit(R5_LOCKED, &dev->flags) - && test_bit(R5_UPTODATE, &dev->flags) - ) { - if (!test_bit(R5_ReWrite, &dev->flags)) { - set_bit(R5_Wantwrite, &dev->flags); - set_bit(R5_ReWrite, &dev->flags); - set_bit(R5_LOCKED, &dev->flags); - s->locked++; - } else { - /* let's read it back */ - set_bit(R5_Wantread, &dev->flags); - set_bit(R5_LOCKED, &dev->flags); - s->locked++; - } - } - } return 0; } @@ -3465,6 +3407,38 @@ static void handle_stripe(struct stripe_head *sh) if (done) goto finish; + + + if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { + md_done_sync(conf->mddev, STRIPE_SECTORS, 1); + clear_bit(STRIPE_SYNCING, &sh->state); + } + + /* If the failed drives are just a ReadError, then we might need + * to progress the repair/check process + */ + if (s.failed <= conf->max_degraded && !conf->mddev->ro) + for (i = 0; i < s.failed; i++) { + struct r5dev *dev = &sh->dev[s.failed_num[i]]; + if (test_bit(R5_ReadError, &dev->flags) + && !test_bit(R5_LOCKED, &dev->flags) + && test_bit(R5_UPTODATE, &dev->flags) + ) { + if (!test_bit(R5_ReWrite, &dev->flags)) { + set_bit(R5_Wantwrite, &dev->flags); + set_bit(R5_ReWrite, &dev->flags); + set_bit(R5_LOCKED, &dev->flags); + s.locked++; + } else { + /* let's read it back */ + set_bit(R5_Wantread, &dev->flags); + set_bit(R5_LOCKED, &dev->flags); + s.locked++; + } + } + } + + /* Finish reconstruct operations initiated by the expansion process */ if (sh->reconstruct_state == reconstruct_state_result) { struct stripe_head *sh_src -- cgit v1.2.3 From 5d35e09cae47bbae2739f432658860680de21866 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 27 Jul 2011 11:00:36 +1000 Subject: md/raid5: rearrange a test in fetch_block6. Next patch will unite fetch_block5 and fetch_block6. First I want to make the differences a little more clear. For RAID6 if we are writing at all and there is a failed device, then we need to load or compute every block so we can do a reconstruct-write. This case isn't needed for RAID5 - we will do a read-modify-write in that case. So make that test a separate test in fetch_block6 rather than merged with two other tests. Make a similar change in fetch_block5 so the one bit that is not needed for RAID6 is clearly separate. Signed-off-by: NeilBrown Reviewed-by: Namhyung Kim --- drivers/md/raid5.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 74a575bbd25c..a3d7cd96cfb7 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2326,9 +2326,8 @@ static int fetch_block5(struct stripe_head *sh, struct stripe_head_state *s, (dev->toread || (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || s->syncing || s->expanding || - (s->failed && - (failed_dev->toread || - (failed_dev->towrite && + (s->failed && failed_dev->toread) || + (s->failed && failed_dev->towrite && !test_bit(R5_OVERWRITE, &failed_dev->flags)))))) { /* We would like to get this block, possibly by computing it, * otherwise read it if the backing disk is insync @@ -2399,10 +2398,9 @@ static int fetch_block6(struct stripe_head *sh, struct stripe_head_state *s, (dev->toread || (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || s->syncing || s->expanding || - (s->failed >= 1 && - (fdev[0]->toread || s->to_write)) || - (s->failed >= 2 && - (fdev[1]->toread || s->to_write)))) { + (s->failed >= 1 && fdev[0]->toread) || + (s->failed >= 2 && fdev[1]->toread) || + (s->failed && s->to_write)) { /* we would like to get this block, possibly by computing it, * otherwise read it if the backing disk is insync */ -- cgit v1.2.3 From 93b3dbce6456a79c545b45e86ccc2244e923cc99 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 27 Jul 2011 11:00:36 +1000 Subject: md/raid5: unite fetch_block5 and fetch_block6 Provided that ->failed_num[1] is not a valid device number (which is easily achieved) fetch_block6 provides all the functionality of fetch_block5. So remove the latter and rename the former to simply "fetch_block". Then handle_stripe_fill5 and handle_stripe_fill6 become the same and can similarly be united. Signed-off-by: NeilBrown Reviewed-by: Namhyung Kim --- drivers/md/raid5.c | 107 ++++++++++++----------------------------------------- 1 file changed, 23 insertions(+), 84 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index a3d7cd96cfb7..a63a679105ca 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2308,91 +2308,20 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh, md_wakeup_thread(conf->mddev->thread); } -/* fetch_block5 - checks the given member device to see if its data needs +/* fetch_block - checks the given member device to see if its data needs * to be read or computed to satisfy a request. * * Returns 1 when no more member devices need to be checked, otherwise returns - * 0 to tell the loop in handle_stripe_fill5 to continue + * 0 to tell the loop in handle_stripe_fill to continue */ -static int fetch_block5(struct stripe_head *sh, struct stripe_head_state *s, - int disk_idx, int disks) -{ - struct r5dev *dev = &sh->dev[disk_idx]; - struct r5dev *failed_dev = &sh->dev[s->failed_num[0]]; - - /* is the data in this block needed, and can we get it? */ - if (!test_bit(R5_LOCKED, &dev->flags) && - !test_bit(R5_UPTODATE, &dev->flags) && - (dev->toread || - (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || - s->syncing || s->expanding || - (s->failed && failed_dev->toread) || - (s->failed && failed_dev->towrite && - !test_bit(R5_OVERWRITE, &failed_dev->flags)))))) { - /* We would like to get this block, possibly by computing it, - * otherwise read it if the backing disk is insync - */ - if ((s->uptodate == disks - 1) && - (s->failed && disk_idx == s->failed_num[0])) { - set_bit(STRIPE_COMPUTE_RUN, &sh->state); - set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); - set_bit(R5_Wantcompute, &dev->flags); - sh->ops.target = disk_idx; - sh->ops.target2 = -1; - s->req_compute = 1; - /* Careful: from this point on 'uptodate' is in the eye - * of raid_run_ops which services 'compute' operations - * before writes. R5_Wantcompute flags a block that will - * be R5_UPTODATE by the time it is needed for a - * subsequent operation. - */ - s->uptodate++; - return 1; /* uptodate + compute == disks */ - } else if (test_bit(R5_Insync, &dev->flags)) { - set_bit(R5_LOCKED, &dev->flags); - set_bit(R5_Wantread, &dev->flags); - s->locked++; - pr_debug("Reading block %d (sync=%d)\n", disk_idx, - s->syncing); - } - } - - return 0; -} - -/** - * handle_stripe_fill5 - read or compute data to satisfy pending requests. - */ -static void handle_stripe_fill5(struct stripe_head *sh, - struct stripe_head_state *s, int disks) -{ - int i; - - /* look for blocks to read/compute, skip this if a compute - * is already in flight, or if the stripe contents are in the - * midst of changing due to a write - */ - if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && - !sh->reconstruct_state) - for (i = disks; i--; ) - if (fetch_block5(sh, s, i, disks)) - break; - set_bit(STRIPE_HANDLE, &sh->state); -} - -/* fetch_block6 - checks the given member device to see if its data needs - * to be read or computed to satisfy a request. - * - * Returns 1 when no more member devices need to be checked, otherwise returns - * 0 to tell the loop in handle_stripe_fill6 to continue - */ -static int fetch_block6(struct stripe_head *sh, struct stripe_head_state *s, - int disk_idx, int disks) +static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, + int disk_idx, int disks) { struct r5dev *dev = &sh->dev[disk_idx]; struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]], &sh->dev[s->failed_num[1]] }; + /* is the data in this block needed, and can we get it? */ if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) && (dev->toread || @@ -2400,7 +2329,9 @@ static int fetch_block6(struct stripe_head *sh, struct stripe_head_state *s, s->syncing || s->expanding || (s->failed >= 1 && fdev[0]->toread) || (s->failed >= 2 && fdev[1]->toread) || - (s->failed && s->to_write)) { + (sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite && + !test_bit(R5_OVERWRITE, &fdev[0]->flags)) || + (sh->raid_conf->level == 6 && s->failed && s->to_write))) { /* we would like to get this block, possibly by computing it, * otherwise read it if the backing disk is insync */ @@ -2420,6 +2351,12 @@ static int fetch_block6(struct stripe_head *sh, struct stripe_head_state *s, sh->ops.target = disk_idx; sh->ops.target2 = -1; /* no 2nd target */ s->req_compute = 1; + /* Careful: from this point on 'uptodate' is in the eye + * of raid_run_ops which services 'compute' operations + * before writes. R5_Wantcompute flags a block that will + * be R5_UPTODATE by the time it is needed for a + * subsequent operation. + */ s->uptodate++; return 1; } else if (s->uptodate == disks-2 && s->failed >= 2) { @@ -2460,11 +2397,11 @@ static int fetch_block6(struct stripe_head *sh, struct stripe_head_state *s, } /** - * handle_stripe_fill6 - read or compute data to satisfy pending requests. + * handle_stripe_fill - read or compute data to satisfy pending requests. */ -static void handle_stripe_fill6(struct stripe_head *sh, - struct stripe_head_state *s, - int disks) +static void handle_stripe_fill(struct stripe_head *sh, + struct stripe_head_state *s, + int disks) { int i; @@ -2475,7 +2412,7 @@ static void handle_stripe_fill6(struct stripe_head *sh, if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && !sh->reconstruct_state) for (i = disks; i--; ) - if (fetch_block6(sh, s, i, disks)) + if (fetch_block(sh, s, i, disks)) break; set_bit(STRIPE_HANDLE, &sh->state); } @@ -3122,7 +3059,7 @@ static int handle_stripe5(struct stripe_head *sh, struct stripe_head_state *s) */ if (s->to_read || s->non_overwrite || (s->syncing && (s->uptodate + s->compute < disks)) || s->expanding) - handle_stripe_fill5(sh, s, disks); + handle_stripe_fill(sh, s, disks); /* Now we check to see if any write operations have recently * completed @@ -3313,7 +3250,7 @@ static int handle_stripe6(struct stripe_head *sh, struct stripe_head_state *s) */ if (s->to_read || s->non_overwrite || (s->to_write && s->failed) || (s->syncing && (s->uptodate + s->compute < disks)) || s->expanding) - handle_stripe_fill6(sh, s, disks); + handle_stripe_fill(sh, s, disks); /* Now we check to see if any write operations have recently * completed @@ -3397,6 +3334,8 @@ static void handle_stripe(struct stripe_head *sh) s.syncing = test_bit(STRIPE_SYNCING, &sh->state); s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); + s.failed_num[0] = -1; + s.failed_num[1] = -1; if (conf->level == 6) done = handle_stripe6(sh, &s); -- cgit v1.2.3 From c8ac1803ff0af5aa614587ac0c66d46b7a3bdfcc Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 27 Jul 2011 11:00:36 +1000 Subject: md/raid5: unite handle_stripe_dirtying5 and handle_stripe_dirtying6 RAID6 is only allowed to choose 'reconstruct-write' while RAID5 is also allow 'read-modify-write' Apart from this difference, handle_stripe_dirtying[56] are nearly identical. So resolve these differences and create just one function. Signed-off-by: NeilBrown Reviewed-by: Namhyung Kim --- drivers/md/raid5.c | 77 +++++++++++++++--------------------------------------- 1 file changed, 21 insertions(+), 56 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index a63a679105ca..a3018970d6a3 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2468,11 +2468,19 @@ static void handle_stripe_clean_event(raid5_conf_t *conf, md_wakeup_thread(conf->mddev->thread); } -static void handle_stripe_dirtying5(raid5_conf_t *conf, - struct stripe_head *sh, struct stripe_head_state *s, int disks) +static void handle_stripe_dirtying(raid5_conf_t *conf, + struct stripe_head *sh, + struct stripe_head_state *s, + int disks) { int rmw = 0, rcw = 0, i; - for (i = disks; i--; ) { + if (conf->max_degraded == 2) { + /* RAID6 requires 'rcw' in current implementation + * Calculate the real rcw later - for now fake it + * look like rcw is cheaper + */ + rcw = 1; rmw = 2; + } else for (i = disks; i--; ) { /* would I have to read this buffer for read_modify_write */ struct r5dev *dev = &sh->dev[i]; if ((dev->towrite || i == sh->pd_idx) && @@ -2519,16 +2527,19 @@ static void handle_stripe_dirtying5(raid5_conf_t *conf, } } } - if (rcw <= rmw && rcw > 0) + if (rcw <= rmw && rcw > 0) { /* want reconstruct write, but need to get some data */ + rcw = 0; for (i = disks; i--; ) { struct r5dev *dev = &sh->dev[i]; if (!test_bit(R5_OVERWRITE, &dev->flags) && - i != sh->pd_idx && + i != sh->pd_idx && i != sh->qd_idx && !test_bit(R5_LOCKED, &dev->flags) && !(test_bit(R5_UPTODATE, &dev->flags) || - test_bit(R5_Wantcompute, &dev->flags)) && - test_bit(R5_Insync, &dev->flags)) { + test_bit(R5_Wantcompute, &dev->flags))) { + rcw++; + if (!test_bit(R5_Insync, &dev->flags)) + continue; /* it's a failed drive */ if ( test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { pr_debug("Read_old block " @@ -2542,6 +2553,7 @@ static void handle_stripe_dirtying5(raid5_conf_t *conf, } } } + } /* now if nothing is locked, and if we have enough data, * we can start a write request */ @@ -2558,53 +2570,6 @@ static void handle_stripe_dirtying5(raid5_conf_t *conf, schedule_reconstruction(sh, s, rcw == 0, 0); } -static void handle_stripe_dirtying6(raid5_conf_t *conf, - struct stripe_head *sh, struct stripe_head_state *s, - int disks) -{ - int rcw = 0, pd_idx = sh->pd_idx, i; - int qd_idx = sh->qd_idx; - - set_bit(STRIPE_HANDLE, &sh->state); - for (i = disks; i--; ) { - struct r5dev *dev = &sh->dev[i]; - /* check if we haven't enough data */ - if (!test_bit(R5_OVERWRITE, &dev->flags) && - i != pd_idx && i != qd_idx && - !test_bit(R5_LOCKED, &dev->flags) && - !(test_bit(R5_UPTODATE, &dev->flags) || - test_bit(R5_Wantcompute, &dev->flags))) { - rcw++; - if (!test_bit(R5_Insync, &dev->flags)) - continue; /* it's a failed drive */ - - if ( - test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { - pr_debug("Read_old stripe %llu " - "block %d for Reconstruct\n", - (unsigned long long)sh->sector, i); - set_bit(R5_LOCKED, &dev->flags); - set_bit(R5_Wantread, &dev->flags); - s->locked++; - } else { - pr_debug("Request delayed stripe %llu " - "block %d for Reconstruct\n", - (unsigned long long)sh->sector, i); - set_bit(STRIPE_DELAYED, &sh->state); - set_bit(STRIPE_HANDLE, &sh->state); - } - } - } - /* now if nothing is locked, and if we have enough data, we can start a - * write request - */ - if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && - s->locked == 0 && rcw == 0 && - !test_bit(STRIPE_BIT_DELAY, &sh->state)) { - schedule_reconstruction(sh, s, 1, 0); - } -} - static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh, struct stripe_head_state *s, int disks) { @@ -3099,7 +3064,7 @@ static int handle_stripe5(struct stripe_head *sh, struct stripe_head_state *s) * block. */ if (s->to_write && !sh->reconstruct_state && !sh->check_state) - handle_stripe_dirtying5(conf, sh, s, disks); + handle_stripe_dirtying(conf, sh, s, disks); /* maybe we need to check and possibly fix the parity for this stripe * Any reads will already have been scheduled, so we just see if enough @@ -3288,7 +3253,7 @@ static int handle_stripe6(struct stripe_head *sh, struct stripe_head_state *s) * block. */ if (s->to_write && !sh->reconstruct_state && !sh->check_state) - handle_stripe_dirtying6(conf, sh, s, disks); + handle_stripe_dirtying(conf, sh, s, disks); /* maybe we need to check and possibly fix the parity for this stripe * Any reads will already have been scheduled, so we just see if enough -- cgit v1.2.3 From 84789554e96c0263ad8aa9be91397ece1f88c768 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 27 Jul 2011 11:00:36 +1000 Subject: md/raid5: move more common code into handle_stripe Apart from 'prexor' which can only be set for RAID5, and 'qd_idx' which can only be meaningful for RAID6, these two chunks of code are nearly the same. So combine them into one adding a test to call either handle_parity_checks5 or handle_parity_checks6 as appropriate. Signed-off-by: NeilBrown Reviewed-by: Namhyung Kim --- drivers/md/raid5.c | 161 ++++++++++++++++++++--------------------------------- 1 file changed, 61 insertions(+), 100 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index a3018970d6a3..cf60b15b4e3a 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1756,7 +1756,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, /* * Select the parity disk based on the user selected algorithm. */ - pd_idx = qd_idx = ~0; + pd_idx = qd_idx = -1; switch(conf->level) { case 4: pd_idx = data_disks; @@ -2903,7 +2903,6 @@ static int handle_stripe5(struct stripe_head *sh, struct stripe_head_state *s) raid5_conf_t *conf = sh->raid_conf; int disks = sh->disks, i; struct r5dev *dev; - int prexor; /* Now to look around and see what can be done */ rcu_read_lock(); @@ -3026,56 +3025,6 @@ static int handle_stripe5(struct stripe_head *sh, struct stripe_head_state *s) (s->syncing && (s->uptodate + s->compute < disks)) || s->expanding) handle_stripe_fill(sh, s, disks); - /* Now we check to see if any write operations have recently - * completed - */ - prexor = 0; - if (sh->reconstruct_state == reconstruct_state_prexor_drain_result) - prexor = 1; - if (sh->reconstruct_state == reconstruct_state_drain_result || - sh->reconstruct_state == reconstruct_state_prexor_drain_result) { - sh->reconstruct_state = reconstruct_state_idle; - - /* All the 'written' buffers and the parity block are ready to - * be written back to disk - */ - BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags)); - for (i = disks; i--; ) { - dev = &sh->dev[i]; - if (test_bit(R5_LOCKED, &dev->flags) && - (i == sh->pd_idx || dev->written)) { - pr_debug("Writing block %d\n", i); - set_bit(R5_Wantwrite, &dev->flags); - if (prexor) - continue; - if (!test_bit(R5_Insync, &dev->flags) || - (i == sh->pd_idx && s->failed == 0)) - set_bit(STRIPE_INSYNC, &sh->state); - } - } - if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) - s->dec_preread_active = 1; - } - - /* Now to consider new write requests and what else, if anything - * should be read. We do not handle new writes when: - * 1/ A 'write' operation (copy+xor) is already in flight. - * 2/ A 'check' operation is in flight, as it may clobber the parity - * block. - */ - if (s->to_write && !sh->reconstruct_state && !sh->check_state) - handle_stripe_dirtying(conf, sh, s, disks); - - /* maybe we need to check and possibly fix the parity for this stripe - * Any reads will already have been scheduled, so we just see if enough - * data is available. The parity check is held off while parity - * dependent operations are in flight. - */ - if (sh->check_state || - (s->syncing && s->locked == 0 && - !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && - !test_bit(STRIPE_INSYNC, &sh->state))) - handle_parity_checks5(conf, sh, s, disks); return 0; } @@ -3217,54 +3166,6 @@ static int handle_stripe6(struct stripe_head *sh, struct stripe_head_state *s) (s->syncing && (s->uptodate + s->compute < disks)) || s->expanding) handle_stripe_fill(sh, s, disks); - /* Now we check to see if any write operations have recently - * completed - */ - if (sh->reconstruct_state == reconstruct_state_drain_result) { - - sh->reconstruct_state = reconstruct_state_idle; - /* All the 'written' buffers and the parity blocks are ready to - * be written back to disk - */ - BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags)); - BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags)); - for (i = disks; i--; ) { - dev = &sh->dev[i]; - if (test_bit(R5_LOCKED, &dev->flags) && - (i == sh->pd_idx || i == qd_idx || - dev->written)) { - pr_debug("Writing block %d\n", i); - BUG_ON(!test_bit(R5_UPTODATE, &dev->flags)); - set_bit(R5_Wantwrite, &dev->flags); - if (!test_bit(R5_Insync, &dev->flags) || - ((i == sh->pd_idx || i == qd_idx) && - s->failed == 0)) - set_bit(STRIPE_INSYNC, &sh->state); - } - } - if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) - s->dec_preread_active = 1; - } - - /* Now to consider new write requests and what else, if anything - * should be read. We do not handle new writes when: - * 1/ A 'write' operation (copy+gen_syndrome) is already in flight. - * 2/ A 'check' operation is in flight, as it may clobber the parity - * block. - */ - if (s->to_write && !sh->reconstruct_state && !sh->check_state) - handle_stripe_dirtying(conf, sh, s, disks); - - /* maybe we need to check and possibly fix the parity for this stripe - * Any reads will already have been scheduled, so we just see if enough - * data is available. The parity check is held off while parity - * dependent operations are in flight. - */ - if (sh->check_state || - (s->syncing && s->locked == 0 && - !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && - !test_bit(STRIPE_INSYNC, &sh->state))) - handle_parity_checks6(conf, sh, s, disks); return 0; } @@ -3273,6 +3174,8 @@ static void handle_stripe(struct stripe_head *sh) struct stripe_head_state s; int done; int i; + int prexor; + int disks = sh->disks; raid5_conf_t *conf = sh->raid_conf; clear_bit(STRIPE_HANDLE, &sh->state); @@ -3310,6 +3213,64 @@ static void handle_stripe(struct stripe_head *sh) if (done) goto finish; + /* Now we check to see if any write operations have recently + * completed + */ + prexor = 0; + if (sh->reconstruct_state == reconstruct_state_prexor_drain_result) + prexor = 1; + if (sh->reconstruct_state == reconstruct_state_drain_result || + sh->reconstruct_state == reconstruct_state_prexor_drain_result) { + sh->reconstruct_state = reconstruct_state_idle; + + /* All the 'written' buffers and the parity block are ready to + * be written back to disk + */ + BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags)); + BUG_ON(sh->qd_idx >= 0 && + !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags)); + for (i = disks; i--; ) { + struct r5dev *dev = &sh->dev[i]; + if (test_bit(R5_LOCKED, &dev->flags) && + (i == sh->pd_idx || i == sh->qd_idx || + dev->written)) { + pr_debug("Writing block %d\n", i); + set_bit(R5_Wantwrite, &dev->flags); + if (prexor) + continue; + if (!test_bit(R5_Insync, &dev->flags) || + ((i == sh->pd_idx || i == sh->qd_idx) && + s.failed == 0)) + set_bit(STRIPE_INSYNC, &sh->state); + } + } + if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) + s.dec_preread_active = 1; + } + + /* Now to consider new write requests and what else, if anything + * should be read. We do not handle new writes when: + * 1/ A 'write' operation (copy+xor) is already in flight. + * 2/ A 'check' operation is in flight, as it may clobber the parity + * block. + */ + if (s.to_write && !sh->reconstruct_state && !sh->check_state) + handle_stripe_dirtying(conf, sh, &s, disks); + + /* maybe we need to check and possibly fix the parity for this stripe + * Any reads will already have been scheduled, so we just see if enough + * data is available. The parity check is held off while parity + * dependent operations are in flight. + */ + if (sh->check_state || + (s.syncing && s.locked == 0 && + !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && + !test_bit(STRIPE_INSYNC, &sh->state))) { + if (conf->level == 6) + handle_parity_checks6(conf, sh, &s, disks); + else + handle_parity_checks5(conf, sh, &s, disks); + } if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { md_done_sync(conf->mddev, STRIPE_SECTORS, 1); -- cgit v1.2.3 From 474af965fe0005b334cabdb2904a7d712c21489b Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 27 Jul 2011 11:00:36 +1000 Subject: md/raid5: move some more common code into handle_stripe The RAID6 version of this code is usable for RAID5 providing: - we test "conf->max_degraded" rather than "2" as appropriate - we make sure s->failed_num[1] is meaningful (and not '-1') when s->failed > 1 The 'return 1' must become 'goto finish' in the new location. Signed-off-by: NeilBrown Reviewed-by: Namhyung Kim --- drivers/md/raid5.c | 180 ++++++++++++++++++++--------------------------------- 1 file changed, 67 insertions(+), 113 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index cf60b15b4e3a..63acc51e8406 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2968,63 +2968,14 @@ static int handle_stripe5(struct stripe_head *sh, struct stripe_head_state *s) if (test_bit(R5_ReadError, &dev->flags)) clear_bit(R5_Insync, &dev->flags); if (!test_bit(R5_Insync, &dev->flags)) { + if (s->failed < 2) + s->failed_num[s->failed] = i; s->failed++; - s->failed_num[0] = i; } } spin_unlock_irq(&conf->device_lock); rcu_read_unlock(); - if (unlikely(s->blocked_rdev)) { - if (s->syncing || s->expanding || s->expanded || - s->to_write || s->written) { - set_bit(STRIPE_HANDLE, &sh->state); - return 1; - } - /* There is nothing for the blocked_rdev to block */ - rdev_dec_pending(s->blocked_rdev, conf->mddev); - s->blocked_rdev = NULL; - } - - if (s->to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { - set_bit(STRIPE_OP_BIOFILL, &s->ops_request); - set_bit(STRIPE_BIOFILL_RUN, &sh->state); - } - - pr_debug("locked=%d uptodate=%d to_read=%d" - " to_write=%d failed=%d failed_num=%d\n", - s->locked, s->uptodate, s->to_read, s->to_write, - s->failed, s->failed_num[0]); - /* check if the array has lost two devices and, if so, some requests might - * need to be failed - */ - if (s->failed > 1 && s->to_read+s->to_write+s->written) - handle_failed_stripe(conf, sh, s, disks, &s->return_bi); - if (s->failed > 1 && s->syncing) { - md_done_sync(conf->mddev, STRIPE_SECTORS,0); - clear_bit(STRIPE_SYNCING, &sh->state); - s->syncing = 0; - } - - /* might be able to return some write requests if the parity block - * is safe, or on a failed drive - */ - dev = &sh->dev[sh->pd_idx]; - if (s->written && - ((test_bit(R5_Insync, &dev->flags) && - !test_bit(R5_LOCKED, &dev->flags) && - test_bit(R5_UPTODATE, &dev->flags)) || - (s->failed == 1 && s->failed_num[0] == sh->pd_idx))) - handle_stripe_clean_event(conf, sh, disks, &s->return_bi); - - /* Now we might consider reading some blocks, either to check/generate - * parity, or to satisfy requests - * or to load a block that is being partially written. - */ - if (s->to_read || s->non_overwrite || - (s->syncing && (s->uptodate + s->compute < disks)) || s->expanding) - handle_stripe_fill(sh, s, disks); - return 0; } @@ -3032,8 +2983,8 @@ static int handle_stripe6(struct stripe_head *sh, struct stripe_head_state *s) { raid5_conf_t *conf = sh->raid_conf; int disks = sh->disks; - int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx; - struct r5dev *dev, *pdev, *qdev; + struct r5dev *dev; + int i; /* Now to look around and see what can be done */ @@ -3107,65 +3058,6 @@ static int handle_stripe6(struct stripe_head *sh, struct stripe_head_state *s) spin_unlock_irq(&conf->device_lock); rcu_read_unlock(); - if (unlikely(s->blocked_rdev)) { - if (s->syncing || s->expanding || s->expanded || - s->to_write || s->written) { - set_bit(STRIPE_HANDLE, &sh->state); - return 1; - } - /* There is nothing for the blocked_rdev to block */ - rdev_dec_pending(s->blocked_rdev, conf->mddev); - s->blocked_rdev = NULL; - } - - if (s->to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { - set_bit(STRIPE_OP_BIOFILL, &s->ops_request); - set_bit(STRIPE_BIOFILL_RUN, &sh->state); - } - - pr_debug("locked=%d uptodate=%d to_read=%d" - " to_write=%d failed=%d failed_num=%d,%d\n", - s->locked, s->uptodate, s->to_read, s->to_write, s->failed, - s->failed_num[0], s->failed_num[1]); - /* check if the array has lost >2 devices and, if so, some requests - * might need to be failed - */ - if (s->failed > 2 && s->to_read+s->to_write+s->written) - handle_failed_stripe(conf, sh, s, disks, &s->return_bi); - if (s->failed > 2 && s->syncing) { - md_done_sync(conf->mddev, STRIPE_SECTORS,0); - clear_bit(STRIPE_SYNCING, &sh->state); - s->syncing = 0; - } - - /* - * might be able to return some write requests if the parity blocks - * are safe, or on a failed drive - */ - pdev = &sh->dev[pd_idx]; - s->p_failed = (s->failed >= 1 && s->failed_num[0] == pd_idx) - || (s->failed >= 2 && s->failed_num[1] == pd_idx); - qdev = &sh->dev[qd_idx]; - s->q_failed = (s->failed >= 1 && s->failed_num[0] == qd_idx) - || (s->failed >= 2 && s->failed_num[1] == qd_idx); - - if (s->written && - (s->p_failed || ((test_bit(R5_Insync, &pdev->flags) - && !test_bit(R5_LOCKED, &pdev->flags) - && test_bit(R5_UPTODATE, &pdev->flags)))) && - (s->q_failed || ((test_bit(R5_Insync, &qdev->flags) - && !test_bit(R5_LOCKED, &qdev->flags) - && test_bit(R5_UPTODATE, &qdev->flags))))) - handle_stripe_clean_event(conf, sh, disks, &s->return_bi); - - /* Now we might consider reading some blocks, either to check/generate - * parity, or to satisfy requests - * or to load a block that is being partially written. - */ - if (s->to_read || s->non_overwrite || (s->to_write && s->failed) || - (s->syncing && (s->uptodate + s->compute < disks)) || s->expanding) - handle_stripe_fill(sh, s, disks); - return 0; } @@ -3173,10 +3065,11 @@ static void handle_stripe(struct stripe_head *sh) { struct stripe_head_state s; int done; + raid5_conf_t *conf = sh->raid_conf; int i; int prexor; int disks = sh->disks; - raid5_conf_t *conf = sh->raid_conf; + struct r5dev *pdev, *qdev; clear_bit(STRIPE_HANDLE, &sh->state); if (test_and_set_bit(STRIPE_ACTIVE, &sh->state)) { @@ -3213,6 +3106,67 @@ static void handle_stripe(struct stripe_head *sh) if (done) goto finish; + if (unlikely(s.blocked_rdev)) { + if (s.syncing || s.expanding || s.expanded || + s.to_write || s.written) { + set_bit(STRIPE_HANDLE, &sh->state); + goto finish; + } + /* There is nothing for the blocked_rdev to block */ + rdev_dec_pending(s.blocked_rdev, conf->mddev); + s.blocked_rdev = NULL; + } + + if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { + set_bit(STRIPE_OP_BIOFILL, &s.ops_request); + set_bit(STRIPE_BIOFILL_RUN, &sh->state); + } + + pr_debug("locked=%d uptodate=%d to_read=%d" + " to_write=%d failed=%d failed_num=%d,%d\n", + s.locked, s.uptodate, s.to_read, s.to_write, s.failed, + s.failed_num[0], s.failed_num[1]); + /* check if the array has lost more than max_degraded devices and, + * if so, some requests might need to be failed. + */ + if (s.failed > conf->max_degraded && s.to_read+s.to_write+s.written) + handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); + if (s.failed > conf->max_degraded && s.syncing) { + md_done_sync(conf->mddev, STRIPE_SECTORS, 0); + clear_bit(STRIPE_SYNCING, &sh->state); + s.syncing = 0; + } + + /* + * might be able to return some write requests if the parity blocks + * are safe, or on a failed drive + */ + pdev = &sh->dev[sh->pd_idx]; + s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) + || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); + qdev = &sh->dev[sh->qd_idx]; + s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx) + || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx) + || conf->level < 6; + + if (s.written && + (s.p_failed || ((test_bit(R5_Insync, &pdev->flags) + && !test_bit(R5_LOCKED, &pdev->flags) + && test_bit(R5_UPTODATE, &pdev->flags)))) && + (s.q_failed || ((test_bit(R5_Insync, &qdev->flags) + && !test_bit(R5_LOCKED, &qdev->flags) + && test_bit(R5_UPTODATE, &qdev->flags))))) + handle_stripe_clean_event(conf, sh, disks, &s.return_bi); + + /* Now we might consider reading some blocks, either to check/generate + * parity, or to satisfy requests + * or to load a block that is being partially written. + */ + if (s.to_read || s.non_overwrite + || (conf->level == 6 && s.to_write && s.failed) + || (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding) + handle_stripe_fill(sh, &s, disks); + /* Now we check to see if any write operations have recently * completed */ -- cgit v1.2.3 From acfe726bdd0000a9be1b308b29fad1e9ae62178c Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 27 Jul 2011 11:00:36 +1000 Subject: md/raid5: finalise new merged handle_stripe. handle_stripe5() and handle_stripe6() are now virtually identical. So discard one and rename the other to 'analyse_stripe()'. It always returns 0, so change it to 'void' and remove the 'done' variable in handle_stripe(). Signed-off-by: NeilBrown Reviewed-by: Namhyung Kim --- drivers/md/raid5.c | 115 ++++++----------------------------------------------- 1 file changed, 13 insertions(+), 102 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 63acc51e8406..b321d6c36594 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2898,100 +2898,27 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh) * */ -static int handle_stripe5(struct stripe_head *sh, struct stripe_head_state *s) -{ - raid5_conf_t *conf = sh->raid_conf; - int disks = sh->disks, i; - struct r5dev *dev; - - /* Now to look around and see what can be done */ - rcu_read_lock(); - spin_lock_irq(&conf->device_lock); - for (i=disks; i--; ) { - mdk_rdev_t *rdev; - - dev = &sh->dev[i]; - - pr_debug("check %d: state 0x%lx toread %p read %p write %p " - "written %p\n", i, dev->flags, dev->toread, dev->read, - dev->towrite, dev->written); - - /* maybe we can request a biofill operation - * - * new wantfill requests are only permitted while - * ops_complete_biofill is guaranteed to be inactive - */ - if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread && - !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) - set_bit(R5_Wantfill, &dev->flags); - - /* now count some things */ - if (test_bit(R5_LOCKED, &dev->flags)) - s->locked++; - if (test_bit(R5_UPTODATE, &dev->flags)) - s->uptodate++; - if (test_bit(R5_Wantcompute, &dev->flags)) - s->compute++; - - if (test_bit(R5_Wantfill, &dev->flags)) - s->to_fill++; - else if (dev->toread) - s->to_read++; - if (dev->towrite) { - s->to_write++; - if (!test_bit(R5_OVERWRITE, &dev->flags)) - s->non_overwrite++; - } - if (dev->written) - s->written++; - rdev = rcu_dereference(conf->disks[i].rdev); - if (s->blocked_rdev == NULL && - rdev && unlikely(test_bit(Blocked, &rdev->flags))) { - s->blocked_rdev = rdev; - atomic_inc(&rdev->nr_pending); - } - clear_bit(R5_Insync, &dev->flags); - if (!rdev) - /* Not in-sync */; - else if (test_bit(In_sync, &rdev->flags)) - set_bit(R5_Insync, &dev->flags); - else { - /* could be in-sync depending on recovery/reshape status */ - if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) - set_bit(R5_Insync, &dev->flags); - } - if (!test_bit(R5_Insync, &dev->flags)) { - /* The ReadError flag will just be confusing now */ - clear_bit(R5_ReadError, &dev->flags); - clear_bit(R5_ReWrite, &dev->flags); - } - if (test_bit(R5_ReadError, &dev->flags)) - clear_bit(R5_Insync, &dev->flags); - if (!test_bit(R5_Insync, &dev->flags)) { - if (s->failed < 2) - s->failed_num[s->failed] = i; - s->failed++; - } - } - spin_unlock_irq(&conf->device_lock); - rcu_read_unlock(); - - return 0; -} - -static int handle_stripe6(struct stripe_head *sh, struct stripe_head_state *s) +static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) { raid5_conf_t *conf = sh->raid_conf; int disks = sh->disks; struct r5dev *dev; int i; - /* Now to look around and see what can be done */ + memset(s, 0, sizeof(*s)); + s->syncing = test_bit(STRIPE_SYNCING, &sh->state); + s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); + s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); + s->failed_num[0] = -1; + s->failed_num[1] = -1; + + /* Now to look around and see what can be done */ rcu_read_lock(); spin_lock_irq(&conf->device_lock); for (i=disks; i--; ) { mdk_rdev_t *rdev; + dev = &sh->dev[i]; pr_debug("check %d: state 0x%lx read %p write %p written %p\n", @@ -3015,9 +2942,9 @@ static int handle_stripe6(struct stripe_head *sh, struct stripe_head_state *s) BUG_ON(s->compute > 2); } - if (test_bit(R5_Wantfill, &dev->flags)) { + if (test_bit(R5_Wantfill, &dev->flags)) s->to_fill++; - } else if (dev->toread) + else if (dev->toread) s->to_read++; if (dev->towrite) { s->to_write++; @@ -3057,14 +2984,11 @@ static int handle_stripe6(struct stripe_head *sh, struct stripe_head_state *s) } spin_unlock_irq(&conf->device_lock); rcu_read_unlock(); - - return 0; } static void handle_stripe(struct stripe_head *sh) { struct stripe_head_state s; - int done; raid5_conf_t *conf = sh->raid_conf; int i; int prexor; @@ -3090,21 +3014,8 @@ static void handle_stripe(struct stripe_head *sh) (unsigned long long)sh->sector, sh->state, atomic_read(&sh->count), sh->pd_idx, sh->qd_idx, sh->check_state, sh->reconstruct_state); - memset(&s, 0, sizeof(s)); - - s.syncing = test_bit(STRIPE_SYNCING, &sh->state); - s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); - s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); - s.failed_num[0] = -1; - s.failed_num[1] = -1; - - if (conf->level == 6) - done = handle_stripe6(sh, &s); - else - done = handle_stripe5(sh, &s); - if (done) - goto finish; + analyse_stripe(sh, &s); if (unlikely(s.blocked_rdev)) { if (s.syncing || s.expanding || s.expanded || -- cgit v1.2.3 From a0a02a7ad62b5615b17294c2075e4916f8d1f0a4 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Wed, 27 Jul 2011 11:00:36 +1000 Subject: md: use proper little-endian bitops Using __test_and_{set,clear}_bit_le() with ignoring its return value can be replaced with __{set,clear}_bit_le(). Signed-off-by: Akinobu Mita Cc: NeilBrown Cc: linux-raid@vger.kernel.org Signed-off-by: NeilBrown --- drivers/md/bitmap.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 574b09afedd3..39ff0ef8b76e 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -932,7 +932,7 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block) if (bitmap->flags & BITMAP_HOSTENDIAN) set_bit(bit, kaddr); else - __test_and_set_bit_le(bit, kaddr); + __set_bit_le(bit, kaddr); kunmap_atomic(kaddr, KM_USER0); PRINTK("set file bit %lu page %lu\n", bit, page->index); } @@ -1304,8 +1304,10 @@ void bitmap_daemon_work(mddev_t *mddev) clear_bit(file_page_offset(bitmap, j), paddr); else - __test_and_clear_bit_le(file_page_offset(bitmap, j), - paddr); + __clear_bit_le( + file_page_offset(bitmap, + j), + paddr); kunmap_atomic(paddr, KM_USER0); } else log->type->clear_region(log, j); -- cgit v1.2.3 From 8bda470e8ebde35f9349e98ecbce4dfb508a60fa Mon Sep 17 00:00:00 2001 From: Christian Dietrich Date: Wed, 27 Jul 2011 11:00:36 +1000 Subject: md/raid: use printk_ratelimited instead of printk_ratelimit As per printk_ratelimit comment, it should not be used. Signed-off-by: Christian Dietrich Signed-off-by: NeilBrown --- drivers/md/raid1.c | 25 +++++++++++++++---------- drivers/md/raid10.c | 23 +++++++++++++---------- drivers/md/raid5.c | 47 +++++++++++++++++++++++++---------------------- 3 files changed, 53 insertions(+), 42 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index f7431b6d8447..d3a8f4bb4fc3 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -35,6 +35,7 @@ #include #include #include +#include #include "md.h" #include "raid1.h" #include "bitmap.h" @@ -287,10 +288,13 @@ static void raid1_end_read_request(struct bio *bio, int error) * oops, read error: */ char b[BDEVNAME_SIZE]; - if (printk_ratelimit()) - printk(KERN_ERR "md/raid1:%s: %s: rescheduling sector %llu\n", - mdname(conf->mddev), - bdevname(conf->mirrors[mirror].rdev->bdev,b), (unsigned long long)r1_bio->sector); + printk_ratelimited( + KERN_ERR "md/raid1:%s: %s: " + "rescheduling sector %llu\n", + mdname(conf->mddev), + bdevname(conf->mirrors[mirror].rdev->bdev, + b), + (unsigned long long)r1_bio->sector); reschedule_retry(r1_bio); } @@ -1580,12 +1584,13 @@ static void raid1d(mddev_t *mddev) GFP_NOIO, mddev); r1_bio->bios[r1_bio->read_disk] = bio; rdev = conf->mirrors[disk].rdev; - if (printk_ratelimit()) - printk(KERN_ERR "md/raid1:%s: redirecting sector %llu to" - " other mirror: %s\n", - mdname(mddev), - (unsigned long long)r1_bio->sector, - bdevname(rdev->bdev,b)); + printk_ratelimited( + KERN_ERR + "md/raid1:%s: redirecting sector %llu" + " to other mirror: %s\n", + mdname(mddev), + (unsigned long long)r1_bio->sector, + bdevname(rdev->bdev, b)); bio->bi_sector = r1_bio->sector + rdev->data_offset; bio->bi_bdev = rdev->bdev; bio->bi_end_io = raid1_end_read_request; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 3715e220e5e0..1725ec1e1e82 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -22,6 +22,7 @@ #include #include #include +#include #include "md.h" #include "raid10.h" #include "raid0.h" @@ -301,10 +302,11 @@ static void raid10_end_read_request(struct bio *bio, int error) * oops, read error - keep the refcount on the rdev */ char b[BDEVNAME_SIZE]; - if (printk_ratelimit()) - printk(KERN_ERR "md/raid10:%s: %s: rescheduling sector %llu\n", - mdname(conf->mddev), - bdevname(conf->mirrors[dev].rdev->bdev,b), (unsigned long long)r10_bio->sector); + printk_ratelimited(KERN_ERR + "md/raid10:%s: %s: rescheduling sector %llu\n", + mdname(conf->mddev), + bdevname(conf->mirrors[dev].rdev->bdev, b), + (unsigned long long)r10_bio->sector); reschedule_retry(r10_bio); } } @@ -1669,12 +1671,13 @@ static void raid10d(mddev_t *mddev) bio_put(bio); slot = r10_bio->read_slot; rdev = conf->mirrors[mirror].rdev; - if (printk_ratelimit()) - printk(KERN_ERR "md/raid10:%s: %s: redirecting sector %llu to" - " another mirror\n", - mdname(mddev), - bdevname(rdev->bdev,b), - (unsigned long long)r10_bio->sector); + printk_ratelimited( + KERN_ERR + "md/raid10:%s: %s: redirecting" + "sector %llu to another mirror\n", + mdname(mddev), + bdevname(rdev->bdev, b), + (unsigned long long)r10_bio->sector); bio = bio_clone_mddev(r10_bio->master_bio, GFP_NOIO, mddev); r10_bio->devs[slot].bio = bio; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index b321d6c36594..467e8e1cd3d2 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -51,6 +51,7 @@ #include #include #include +#include #include "md.h" #include "raid5.h" #include "raid0.h" @@ -96,8 +97,6 @@ #define __inline__ #endif -#define printk_rl(args...) ((void) (printk_ratelimit() && printk(args))) - /* * We maintain a biased count of active stripes in the bottom 16 bits of * bi_phys_segments, and a count of processed stripes in the upper 16 bits @@ -1583,12 +1582,14 @@ static void raid5_end_read_request(struct bio * bi, int error) set_bit(R5_UPTODATE, &sh->dev[i].flags); if (test_bit(R5_ReadError, &sh->dev[i].flags)) { rdev = conf->disks[i].rdev; - printk_rl(KERN_INFO "md/raid:%s: read error corrected" - " (%lu sectors at %llu on %s)\n", - mdname(conf->mddev), STRIPE_SECTORS, - (unsigned long long)(sh->sector - + rdev->data_offset), - bdevname(rdev->bdev, b)); + printk_ratelimited( + KERN_INFO + "md/raid:%s: read error corrected" + " (%lu sectors at %llu on %s)\n", + mdname(conf->mddev), STRIPE_SECTORS, + (unsigned long long)(sh->sector + + rdev->data_offset), + bdevname(rdev->bdev, b)); clear_bit(R5_ReadError, &sh->dev[i].flags); clear_bit(R5_ReWrite, &sh->dev[i].flags); } @@ -1602,22 +1603,24 @@ static void raid5_end_read_request(struct bio * bi, int error) clear_bit(R5_UPTODATE, &sh->dev[i].flags); atomic_inc(&rdev->read_errors); if (conf->mddev->degraded >= conf->max_degraded) - printk_rl(KERN_WARNING - "md/raid:%s: read error not correctable " - "(sector %llu on %s).\n", - mdname(conf->mddev), - (unsigned long long)(sh->sector - + rdev->data_offset), - bdn); + printk_ratelimited( + KERN_WARNING + "md/raid:%s: read error not correctable " + "(sector %llu on %s).\n", + mdname(conf->mddev), + (unsigned long long)(sh->sector + + rdev->data_offset), + bdn); else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) /* Oh, no!!! */ - printk_rl(KERN_WARNING - "md/raid:%s: read error NOT corrected!! " - "(sector %llu on %s).\n", - mdname(conf->mddev), - (unsigned long long)(sh->sector - + rdev->data_offset), - bdn); + printk_ratelimited( + KERN_WARNING + "md/raid:%s: read error NOT corrected!! " + "(sector %llu on %s).\n", + mdname(conf->mddev), + (unsigned long long)(sh->sector + + rdev->data_offset), + bdn); else if (atomic_read(&rdev->read_errors) > conf->max_nr_stripes) printk(KERN_WARNING -- cgit v1.2.3 From 36fad858a7404a9656122a9e560a224ae2a00979 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 27 Jul 2011 11:00:36 +1000 Subject: md: introduce link/unlink_rdev() helpers There are places where sysfs links to rdev are handled in a same way. Add the helper functions to consolidate them. Signed-off-by: Namhyung Kim Signed-off-by: NeilBrown --- drivers/md/md.c | 47 ++++++++++++++--------------------------------- drivers/md/md.h | 14 ++++++++++++++ drivers/md/raid1.c | 15 +++++---------- drivers/md/raid5.c | 10 +++------- 4 files changed, 36 insertions(+), 50 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index 91e31e260b4a..0398dc42a956 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -2459,7 +2459,6 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) { char *e; int err; - char nm[20]; int slot = simple_strtoul(buf, &e, 10); if (strncmp(buf, "none", 4)==0) slot = -1; @@ -2482,8 +2481,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) hot_remove_disk(rdev->mddev, rdev->raid_disk); if (err) return err; - sprintf(nm, "rd%d", rdev->raid_disk); - sysfs_remove_link(&rdev->mddev->kobj, nm); + sysfs_unlink_rdev(rdev->mddev, rdev); rdev->raid_disk = -1; set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); md_wakeup_thread(rdev->mddev->thread); @@ -2522,8 +2520,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) return err; } else sysfs_notify_dirent_safe(rdev->sysfs_state); - sprintf(nm, "rd%d", rdev->raid_disk); - if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm)) + if (sysfs_link_rdev(rdev->mddev, rdev)) /* failure here is OK */; /* don't wakeup anyone, leave that to userspace. */ } else { @@ -3149,15 +3146,13 @@ level_store(mddev_t *mddev, const char *buf, size_t len) } list_for_each_entry(rdev, &mddev->disks, same_set) { - char nm[20]; if (rdev->raid_disk < 0) continue; if (rdev->new_raid_disk >= mddev->raid_disks) rdev->new_raid_disk = -1; if (rdev->new_raid_disk == rdev->raid_disk) continue; - sprintf(nm, "rd%d", rdev->raid_disk); - sysfs_remove_link(&mddev->kobj, nm); + sysfs_unlink_rdev(mddev, rdev); } list_for_each_entry(rdev, &mddev->disks, same_set) { if (rdev->raid_disk < 0) @@ -3168,11 +3163,10 @@ level_store(mddev_t *mddev, const char *buf, size_t len) if (rdev->raid_disk < 0) clear_bit(In_sync, &rdev->flags); else { - char nm[20]; - sprintf(nm, "rd%d", rdev->raid_disk); - if(sysfs_create_link(&mddev->kobj, &rdev->kobj, nm)) - printk("md: cannot register %s for %s after level change\n", - nm, mdname(mddev)); + if (sysfs_link_rdev(mddev, rdev)) + printk(KERN_WARNING "md: cannot register rd%d" + " for %s after level change\n", + rdev->raid_disk, mdname(mddev)); } } @@ -4621,12 +4615,9 @@ int md_run(mddev_t *mddev) smp_wmb(); mddev->ready = 1; list_for_each_entry(rdev, &mddev->disks, same_set) - if (rdev->raid_disk >= 0) { - char nm[20]; - sprintf(nm, "rd%d", rdev->raid_disk); - if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm)) + if (rdev->raid_disk >= 0) + if (sysfs_link_rdev(mddev, rdev)) /* failure here is OK */; - } set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); @@ -4854,11 +4845,8 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) sysfs_notify_dirent_safe(mddev->sysfs_state); list_for_each_entry(rdev, &mddev->disks, same_set) - if (rdev->raid_disk >= 0) { - char nm[20]; - sprintf(nm, "rd%d", rdev->raid_disk); - sysfs_remove_link(&mddev->kobj, nm); - } + if (rdev->raid_disk >= 0) + sysfs_unlink_rdev(mddev, rdev); set_capacity(disk, 0); mutex_unlock(&mddev->open_mutex); @@ -7077,9 +7065,7 @@ static int remove_and_add_spares(mddev_t *mddev) atomic_read(&rdev->nr_pending)==0) { if (mddev->pers->hot_remove_disk( mddev, rdev->raid_disk)==0) { - char nm[20]; - sprintf(nm,"rd%d", rdev->raid_disk); - sysfs_remove_link(&mddev->kobj, nm); + sysfs_unlink_rdev(mddev, rdev); rdev->raid_disk = -1; } } @@ -7096,10 +7082,7 @@ static int remove_and_add_spares(mddev_t *mddev) rdev->recovery_offset = 0; if (mddev->pers-> hot_add_disk(mddev, rdev) == 0) { - char nm[20]; - sprintf(nm, "rd%d", rdev->raid_disk); - if (sysfs_create_link(&mddev->kobj, - &rdev->kobj, nm)) + if (sysfs_link_rdev(mddev, rdev)) /* failure here is OK */; spares++; md_new_event(mddev); @@ -7219,9 +7202,7 @@ void md_check_recovery(mddev_t *mddev) atomic_read(&rdev->nr_pending)==0) { if (mddev->pers->hot_remove_disk( mddev, rdev->raid_disk)==0) { - char nm[20]; - sprintf(nm,"rd%d", rdev->raid_disk); - sysfs_remove_link(&mddev->kobj, nm); + sysfs_unlink_rdev(mddev, rdev); rdev->raid_disk = -1; } } diff --git a/drivers/md/md.h b/drivers/md/md.h index 1c26c7a08ae6..6863f722cd2a 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -413,6 +413,20 @@ static inline char * mdname (mddev_t * mddev) return mddev->gendisk ? mddev->gendisk->disk_name : "mdX"; } +static inline int sysfs_link_rdev(mddev_t *mddev, mdk_rdev_t *rdev) +{ + char nm[20]; + sprintf(nm, "rd%d", rdev->raid_disk); + return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); +} + +static inline void sysfs_unlink_rdev(mddev_t *mddev, mdk_rdev_t *rdev) +{ + char nm[20]; + sprintf(nm, "rd%d", rdev->raid_disk); + sysfs_remove_link(&mddev->kobj, nm); +} + /* * iterates through some rdev ringlist. It's safe to remove the * current 'rdev'. Dont touch 'tmp' though. diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index d3a8f4bb4fc3..1d79a041db09 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2159,18 +2159,13 @@ static int raid1_reshape(mddev_t *mddev) for (d = d2 = 0; d < conf->raid_disks; d++) { mdk_rdev_t *rdev = conf->mirrors[d].rdev; if (rdev && rdev->raid_disk != d2) { - char nm[20]; - sprintf(nm, "rd%d", rdev->raid_disk); - sysfs_remove_link(&mddev->kobj, nm); + sysfs_unlink_rdev(mddev, rdev); rdev->raid_disk = d2; - sprintf(nm, "rd%d", rdev->raid_disk); - sysfs_remove_link(&mddev->kobj, nm); - if (sysfs_create_link(&mddev->kobj, - &rdev->kobj, nm)) + sysfs_unlink_rdev(mddev, rdev); + if (sysfs_link_rdev(mddev, rdev)) printk(KERN_WARNING - "md/raid1:%s: cannot register " - "%s\n", - mdname(mddev), nm); + "md/raid1:%s: cannot register rd%d\n", + mdname(mddev), rdev->raid_disk); } if (rdev) newmirrors[d2++].rdev = rdev; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 467e8e1cd3d2..0cd591472e1f 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -5152,16 +5152,14 @@ static int raid5_start_reshape(mddev_t *mddev) if (rdev->raid_disk < 0 && !test_bit(Faulty, &rdev->flags)) { if (raid5_add_disk(mddev, rdev) == 0) { - char nm[20]; if (rdev->raid_disk >= conf->previous_raid_disks) { set_bit(In_sync, &rdev->flags); added_devices++; } else rdev->recovery_offset = 0; - sprintf(nm, "rd%d", rdev->raid_disk); - if (sysfs_create_link(&mddev->kobj, - &rdev->kobj, nm)) + + if (sysfs_link_rdev(mddev, rdev)) /* Failure here is OK */; } } else if (rdev->raid_disk >= conf->previous_raid_disks @@ -5257,9 +5255,7 @@ static void raid5_finish_reshape(mddev_t *mddev) d++) { mdk_rdev_t *rdev = conf->disks[d].rdev; if (rdev && raid5_remove_disk(mddev, d) == 0) { - char nm[20]; - sprintf(nm, "rd%d", rdev->raid_disk); - sysfs_remove_link(&mddev->kobj, nm); + sysfs_unlink_rdev(mddev, rdev); rdev->raid_disk = -1; } } -- cgit v1.2.3 From a478a069b6adf186373a6648c54fc5f1d3c2f8eb Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 27 Jul 2011 11:00:36 +1000 Subject: md: remove ro check in md_check_recovery() Commit c89a8eee6154 ("Allow faulty devices to be removed from a readonly array.") added some work on ro array in the function, but it couldn't be done since we didn't allow the ro array to be handled from the beginning. Fix it. Signed-off-by: Namhyung Kim Signed-off-by: NeilBrown --- drivers/md/md.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index 0398dc42a956..77bd8d8708e4 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -7163,9 +7163,6 @@ void md_check_recovery(mddev_t *mddev) if (mddev->bitmap) bitmap_daemon_work(mddev); - if (mddev->ro) - return; - if (signal_pending(current)) { if (mddev->pers->sync_request && !mddev->external) { printk(KERN_INFO "md: %s in immediate safe mode\n", -- cgit v1.2.3 From 5389042ffa36976caa45a79af16081d759001fa7 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 27 Jul 2011 11:00:36 +1000 Subject: md: change managed of recovery_disabled. If we hit a read error while recovering a mirror, we want to abort the recovery without necessarily failing the disk - as having a disk this a read error is better than not having an array at all. Currently this is managed with a per-array flag "recovery_disabled" and is only implemented for RAID1. For RAID10 we will need finer grained control as we might want to disable recovery for individual devices separately. So push more of the decision making into the personality. 'recovery_disabled' is now a 'cookie' which is copied when the personality want to disable recovery and is changed when a device is added to the array as this is used as a trigger to 'try recovery again'. This will allow RAID10 to get the control that it needs. Signed-off-by: NeilBrown --- drivers/md/md.c | 4 ++-- drivers/md/md.h | 9 ++++++--- drivers/md/raid1.c | 7 +++++-- drivers/md/raid1.h | 6 ++++++ 4 files changed, 19 insertions(+), 7 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index 77bd8d8708e4..c7d9c6af4634 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1922,7 +1922,7 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) bd_link_disk_holder(rdev->bdev, mddev->gendisk); /* May as well allow recovery to be retried once */ - mddev->recovery_disabled = 0; + mddev->recovery_disabled++; return 0; @@ -7070,7 +7070,7 @@ static int remove_and_add_spares(mddev_t *mddev) } } - if (mddev->degraded && !mddev->recovery_disabled) { + if (mddev->degraded) { list_for_each_entry(rdev, &mddev->disks, same_set) { if (rdev->raid_disk >= 0 && !test_bit(In_sync, &rdev->flags) && diff --git a/drivers/md/md.h b/drivers/md/md.h index 6863f722cd2a..de5455d30d41 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -239,9 +239,12 @@ struct mddev_s #define MD_RECOVERY_FROZEN 9 unsigned long recovery; - int recovery_disabled; /* if we detect that recovery - * will always fail, set this - * so we don't loop trying */ + /* If a RAID personality determines that recovery (of a particular + * device) will fail due to a read error on the source device, it + * takes a copy of this number and does not attempt recovery again + * until this number changes. + */ + int recovery_disabled; int in_sync; /* know to not need resync */ /* 'open_mutex' avoids races between 'md_open' and 'do_md_stop', so diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 1d79a041db09..44069b38d6dd 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -956,7 +956,7 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev) * However don't try a recovery from this drive as * it is very likely to fail. */ - mddev->recovery_disabled = 1; + conf->recovery_disabled = mddev->recovery_disabled; return; } if (test_and_clear_bit(In_sync, &rdev->flags)) { @@ -1052,6 +1052,9 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) int first = 0; int last = mddev->raid_disks - 1; + if (mddev->recovery_disabled == conf->recovery_disabled) + return -EBUSY; + if (rdev->raid_disk >= 0) first = last = rdev->raid_disk; @@ -1107,7 +1110,7 @@ static int raid1_remove_disk(mddev_t *mddev, int number) * is not possible. */ if (!test_bit(Faulty, &rdev->flags) && - !mddev->recovery_disabled && + mddev->recovery_disabled != conf->recovery_disabled && mddev->degraded < conf->raid_disks) { err = -EBUSY; goto abort; diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h index e743a64fac4f..3cd18cfda2ad 100644 --- a/drivers/md/raid1.h +++ b/drivers/md/raid1.h @@ -48,6 +48,12 @@ struct r1_private_data_s { * (fresh device added). * Cleared when a sync completes. */ + int recovery_disabled; /* when the same as + * mddev->recovery_disabled + * we don't allow recovery + * to be attempted as we + * expect a read error + */ wait_queue_head_t wait_barrier; -- cgit v1.2.3 From 2bb77736ae5dca0a189829fbb7379d43364a9dac Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 27 Jul 2011 11:00:36 +1000 Subject: md/raid10: Make use of new recovery_disabled handling When we get a read error during recovery, RAID10 previously arranged for the recovering device to appear to fail so that the recovery stops and doesn't restart. This is misleading and wrong. Instead, make use of the new recovery_disabled handling and mark the target device and having recovery disabled. Add appropriate checks in add_disk and remove_disk so that devices are removed and not re-added when recovery is disabled. Signed-off-by: NeilBrown --- drivers/md/raid10.c | 62 +++++++++++++++++++++++++++++++---------------------- drivers/md/raid10.h | 5 +++++ 2 files changed, 41 insertions(+), 26 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 1725ec1e1e82..5583201e5cde 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1099,7 +1099,6 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) conf_t *conf = mddev->private; int err = -EEXIST; int mirror; - mirror_info_t *p; int first = 0; int last = conf->raid_disks - 1; @@ -1119,32 +1118,36 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) mirror = rdev->saved_raid_disk; else mirror = first; - for ( ; mirror <= last ; mirror++) - if ( !(p=conf->mirrors+mirror)->rdev) { - - disk_stack_limits(mddev->gendisk, rdev->bdev, - rdev->data_offset << 9); - /* as we don't honour merge_bvec_fn, we must - * never risk violating it, so limit - * ->max_segments to one lying with a single - * page, as a one page request is never in - * violation. - */ - if (rdev->bdev->bd_disk->queue->merge_bvec_fn) { - blk_queue_max_segments(mddev->queue, 1); - blk_queue_segment_boundary(mddev->queue, - PAGE_CACHE_SIZE - 1); - } + for ( ; mirror <= last ; mirror++) { + mirror_info_t *p = &conf->mirrors[mirror]; + if (p->recovery_disabled == mddev->recovery_disabled) + continue; + if (!p->rdev) + continue; - p->head_position = 0; - rdev->raid_disk = mirror; - err = 0; - if (rdev->saved_raid_disk != mirror) - conf->fullsync = 1; - rcu_assign_pointer(p->rdev, rdev); - break; + disk_stack_limits(mddev->gendisk, rdev->bdev, + rdev->data_offset << 9); + /* as we don't honour merge_bvec_fn, we must + * never risk violating it, so limit + * ->max_segments to one lying with a single + * page, as a one page request is never in + * violation. + */ + if (rdev->bdev->bd_disk->queue->merge_bvec_fn) { + blk_queue_max_segments(mddev->queue, 1); + blk_queue_segment_boundary(mddev->queue, + PAGE_CACHE_SIZE - 1); } + p->head_position = 0; + rdev->raid_disk = mirror; + err = 0; + if (rdev->saved_raid_disk != mirror) + conf->fullsync = 1; + rcu_assign_pointer(p->rdev, rdev); + break; + } + md_integrity_add_rdev(rdev, mddev); print_conf(conf); return err; @@ -1169,6 +1172,7 @@ static int raid10_remove_disk(mddev_t *mddev, int number) * is not possible. */ if (!test_bit(Faulty, &rdev->flags) && + mddev->recovery_disabled != p->recovery_disabled && enough(conf)) { err = -EBUSY; goto abort; @@ -1383,8 +1387,14 @@ static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio) md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9); if (test_bit(R10BIO_Uptodate, &r10_bio->state)) generic_make_request(wbio); - else - bio_endio(wbio, -EIO); + else { + printk(KERN_NOTICE + "md/raid10:%s: recovery aborted due to read error\n", + mdname(mddev)); + conf->mirrors[d].recovery_disabled = mddev->recovery_disabled; + set_bit(MD_RECOVERY_INTR, &mddev->recovery); + bio_endio(wbio, 0); + } } diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h index 944b1104d3b4..a485914c48c1 100644 --- a/drivers/md/raid10.h +++ b/drivers/md/raid10.h @@ -6,6 +6,11 @@ typedef struct mirror_info mirror_info_t; struct mirror_info { mdk_rdev_t *rdev; sector_t head_position; + int recovery_disabled; /* matches + * mddev->recovery_disabled + * when we shouldn't try + * recovering this device. + */ }; typedef struct r10bio_s r10bio_t; -- cgit v1.2.3 From 700c72138938cf428c74379806886c6b017d6295 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 27 Jul 2011 11:00:36 +1000 Subject: md/raid10: Improve decision on whether to fail a device with a read error. Normally we would fail a device with a READ error. However if doing so causes the array to fail, it is better to leave the device in place and just return the read error to the caller. The current test for decide if the array will fail is overly simplistic. We have a function 'enough' which can tell if the array is failed or not, so use it to guide the decision. Signed-off-by: NeilBrown --- drivers/md/raid10.c | 57 ++++++++++++++++++++++++++--------------------------- 1 file changed, 28 insertions(+), 29 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 5583201e5cde..6721cb080354 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -970,6 +970,30 @@ static void status(struct seq_file *seq, mddev_t *mddev) seq_printf(seq, "]"); } +/* check if there are enough drives for + * every block to appear on atleast one. + * Don't consider the device numbered 'ignore' + * as we might be about to remove it. + */ +static int enough(conf_t *conf, int ignore) +{ + int first = 0; + + do { + int n = conf->copies; + int cnt = 0; + while (n--) { + if (conf->mirrors[first].rdev && + first != ignore) + cnt++; + first = (first+1) % conf->raid_disks; + } + if (cnt == 0) + return 0; + } while (first != 0); + return 1; +} + static void error(mddev_t *mddev, mdk_rdev_t *rdev) { char b[BDEVNAME_SIZE]; @@ -982,13 +1006,9 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev) * else mark the drive as failed */ if (test_bit(In_sync, &rdev->flags) - && conf->raid_disks-mddev->degraded == 1) + && !enough(conf, rdev->raid_disk)) /* * Don't fail the drive, just return an IO error. - * The test should really be more sophisticated than - * "working_disks == 1", but it isn't critical, and - * can wait until we do more sophisticated "is the drive - * really dead" tests... */ return; if (test_and_clear_bit(In_sync, &rdev->flags)) { @@ -1043,27 +1063,6 @@ static void close_sync(conf_t *conf) conf->r10buf_pool = NULL; } -/* check if there are enough drives for - * every block to appear on atleast one - */ -static int enough(conf_t *conf) -{ - int first = 0; - - do { - int n = conf->copies; - int cnt = 0; - while (n--) { - if (conf->mirrors[first].rdev) - cnt++; - first = (first+1) % conf->raid_disks; - } - if (cnt == 0) - return 0; - } while (first != 0); - return 1; -} - static int raid10_spare_active(mddev_t *mddev) { int i; @@ -1107,7 +1106,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) * very different from resync */ return -EBUSY; - if (!enough(conf)) + if (!enough(conf, -1)) return -EINVAL; if (rdev->raid_disk >= 0) @@ -1173,7 +1172,7 @@ static int raid10_remove_disk(mddev_t *mddev, int number) */ if (!test_bit(Faulty, &rdev->flags) && mddev->recovery_disabled != p->recovery_disabled && - enough(conf)) { + enough(conf, -1)) { err = -EBUSY; goto abort; } @@ -2286,7 +2285,7 @@ static int run(mddev_t *mddev) disk->head_position = 0; } /* need to check that every block has at least one working mirror */ - if (!enough(conf)) { + if (!enough(conf, -1)) { printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n", mdname(mddev)); goto out_free_conf; -- cgit v1.2.3 From 65a06f06744d3deae61f3596566952a8da92199f Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 27 Jul 2011 11:00:36 +1000 Subject: md: get rid of unnecessary casts on page_address() page_address() returns void pointer, so the casts can be removed. Signed-off-by: Namhyung Kim Signed-off-by: NeilBrown --- drivers/md/md.c | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index c7d9c6af4634..2a3205053b2a 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1025,7 +1025,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version ret = -EINVAL; bdevname(rdev->bdev, b); - sb = (mdp_super_t*)page_address(rdev->sb_page); + sb = page_address(rdev->sb_page); if (sb->md_magic != MD_SB_MAGIC) { printk(KERN_ERR "md: invalid raid superblock magic on %s\n", @@ -1064,7 +1064,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version ret = 1; } else { __u64 ev1, ev2; - mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page); + mdp_super_t *refsb = page_address(refdev->sb_page); if (!uuid_equal(refsb, sb)) { printk(KERN_WARNING "md: %s has different UUID to %s\n", b, bdevname(refdev->bdev,b2)); @@ -1099,7 +1099,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) { mdp_disk_t *desc; - mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page); + mdp_super_t *sb = page_address(rdev->sb_page); __u64 ev1 = md_event(sb); rdev->raid_disk = -1; @@ -1230,7 +1230,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) rdev->sb_size = MD_SB_BYTES; - sb = (mdp_super_t*)page_address(rdev->sb_page); + sb = page_address(rdev->sb_page); memset(sb, 0, sizeof(*sb)); @@ -1435,7 +1435,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) if (ret) return ret; - sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); + sb = page_address(rdev->sb_page); if (sb->magic != cpu_to_le32(MD_SB_MAGIC) || sb->major_version != cpu_to_le32(1) || @@ -1477,8 +1477,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) ret = 1; } else { __u64 ev1, ev2; - struct mdp_superblock_1 *refsb = - (struct mdp_superblock_1*)page_address(refdev->sb_page); + struct mdp_superblock_1 *refsb = page_address(refdev->sb_page); if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 || sb->level != refsb->level || @@ -1513,7 +1512,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) { - struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); + struct mdp_superblock_1 *sb = page_address(rdev->sb_page); __u64 ev1 = le64_to_cpu(sb->events); rdev->raid_disk = -1; @@ -1619,7 +1618,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) int max_dev, i; /* make rdev->sb match mddev and rdev data. */ - sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); + sb = page_address(rdev->sb_page); sb->feature_map = 0; sb->pad0 = 0; @@ -1724,7 +1723,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) num_sectors = max_sectors; rdev->sb_start = sb_start; } - sb = (struct mdp_superblock_1 *) page_address(rdev->sb_page); + sb = page_address(rdev->sb_page); sb->data_size = cpu_to_le64(num_sectors); sb->super_offset = rdev->sb_start; sb->sb_csum = calc_sb_1_csum(sb); @@ -2127,10 +2126,10 @@ static void print_rdev(mdk_rdev_t *rdev, int major_version) printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version); switch (major_version) { case 0: - print_sb_90((mdp_super_t*)page_address(rdev->sb_page)); + print_sb_90(page_address(rdev->sb_page)); break; case 1: - print_sb_1((struct mdp_superblock_1 *)page_address(rdev->sb_page)); + print_sb_1(page_address(rdev->sb_page)); break; } } else -- cgit v1.2.3 From 9d3d80113df824a266c5db3fac357a036ebc0b62 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 27 Jul 2011 11:00:36 +1000 Subject: md/raid1: move rdev->corrected_errors counting Read errors are considered to corrected if write-back and re-read cycle is finished without further problems. Thus moving the rdev-> corrected_errors counting after the re-reading looks more reasonable IMHO. Also included a couple of whitespace fixes on sync_page_io(). Signed-off-by: Namhyung Kim Signed-off-by: NeilBrown --- drivers/md/raid1.c | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 44069b38d6dd..a7e69081187f 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1224,9 +1224,7 @@ static int fix_sync_read_error(r1bio_t *r1_bio) * active, and resync is currently active */ rdev = conf->mirrors[d].rdev; - if (sync_page_io(rdev, - sect, - s<<9, + if (sync_page_io(rdev, sect, s<<9, bio->bi_io_vec[idx].bv_page, READ, false)) { success = 1; @@ -1261,16 +1259,13 @@ static int fix_sync_read_error(r1bio_t *r1_bio) if (r1_bio->bios[d]->bi_end_io != end_sync_read) continue; rdev = conf->mirrors[d].rdev; - if (sync_page_io(rdev, - sect, - s<<9, + if (sync_page_io(rdev, sect, s<<9, bio->bi_io_vec[idx].bv_page, WRITE, false) == 0) { r1_bio->bios[d]->bi_end_io = NULL; rdev_dec_pending(rdev, mddev); md_error(mddev, rdev); - } else - atomic_add(s, &rdev->corrected_errors); + } } d = start; while (d != r1_bio->read_disk) { @@ -1280,12 +1275,12 @@ static int fix_sync_read_error(r1bio_t *r1_bio) if (r1_bio->bios[d]->bi_end_io != end_sync_read) continue; rdev = conf->mirrors[d].rdev; - if (sync_page_io(rdev, - sect, - s<<9, + if (sync_page_io(rdev, sect, s<<9, bio->bi_io_vec[idx].bv_page, READ, false) == 0) md_error(mddev, rdev); + else + atomic_add(s, &rdev->corrected_errors); } sectors -= s; sect += s; -- cgit v1.2.3 From ddd5115fe5594f5aae3c7f0008a5327bb1d19397 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 27 Jul 2011 11:00:36 +1000 Subject: md/raid5: move rdev->corrected_errors counting Read errors are considered to corrected if write-back and re-read cycle is finished without further problems. Thus moving the rdev-> corrected_errors counting after the re-reading looks more reasonable IMHO. Signed-off-by: Namhyung Kim Signed-off-by: NeilBrown --- drivers/md/raid5.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 0cd591472e1f..a81eca6434dd 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -547,10 +547,6 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) bi->bi_io_vec[0].bv_offset = 0; bi->bi_size = STRIPE_SIZE; bi->bi_next = NULL; - if ((rw & WRITE) && - test_bit(R5_ReWrite, &sh->dev[i].flags)) - atomic_add(STRIPE_SECTORS, - &rdev->corrected_errors); generic_make_request(bi); } else { if (rw & WRITE) @@ -1590,6 +1586,7 @@ static void raid5_end_read_request(struct bio * bi, int error) (unsigned long long)(sh->sector + rdev->data_offset), bdevname(rdev->bdev, b)); + atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); clear_bit(R5_ReadError, &sh->dev[i].flags); clear_bit(R5_ReWrite, &sh->dev[i].flags); } -- cgit v1.2.3 From cbea21703b2484f83faef040ed1de30114794392 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 27 Jul 2011 11:00:36 +1000 Subject: md/raid10: move rdev->corrected_errors counting Read errors are considered to corrected if write-back and re-read cycle is finished without further problems. Thus moving the rdev-> corrected_errors counting after the re-reading looks more reasonable IMHO. Signed-off-by: Namhyung Kim Signed-off-by: NeilBrown --- drivers/md/raid10.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/md') diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 6721cb080354..5def27c28be7 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1533,7 +1533,6 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio) test_bit(In_sync, &rdev->flags)) { atomic_inc(&rdev->nr_pending); rcu_read_unlock(); - atomic_add(s, &rdev->corrected_errors); if (sync_page_io(rdev, r10_bio->devs[sl].addr + sect, @@ -1598,6 +1597,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio) (unsigned long long)( sect + rdev->data_offset), bdevname(rdev->bdev, b)); + atomic_add(s, &rdev->corrected_errors); } rdev_dec_pending(rdev, mddev); -- cgit v1.2.3 From 8cfa7b0f67b4d899efc7f39eb7e172fd79237811 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 27 Jul 2011 11:00:36 +1000 Subject: md/raid5: Avoid BUG caused by multiple failures. While preparing to write a stripe we keep the parity block or blocks locked (R5_LOCKED) - towards the end of schedule_reconstruction. If the array is discovered to have failed before this write completes we can leave those blocks LOCKED, and init_stripe will notice that a free stripe still has a locked block and will complain. So clear the R5_LOCKED flag in handle_failed_stripe, and demote the 'BUG' to a 'WARN_ON'. Signed-off-by: NeilBrown --- drivers/md/raid5.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index a81eca6434dd..b874f42694e2 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -340,7 +340,7 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) (unsigned long long)sh->sector, i, dev->toread, dev->read, dev->towrite, dev->written, test_bit(R5_LOCKED, &dev->flags)); - BUG(); + WARN_ON(1); } dev->flags = 0; raid5_build_block(sh, i, previous); @@ -2301,6 +2301,10 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh, if (bitmap_end) bitmap_endwrite(conf->mddev->bitmap, sh->sector, STRIPE_SECTORS, 0, 0); + /* If we were in the middle of a write the parity block might + * still be locked - so just clear all R5_LOCKED flags + */ + clear_bit(R5_LOCKED, &sh->dev[i].flags); } if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) -- cgit v1.2.3 From 654e8b5abc0a793f0c029128db6e4804691a383e Mon Sep 17 00:00:00 2001 From: Jonathan Brassow Date: Wed, 27 Jul 2011 11:00:36 +1000 Subject: MD: raid1 s/sysfs_notify_dirent/sysfs_notify_dirent_safe If device-mapper creates a RAID1 array that includes devices to be rebuilt, it will deref a NULL pointer when finished because sysfs is not used by device-mapper instantiated RAID devices. Signed-off-by: Jonathan Brassow Signed-off-by: NeilBrown --- drivers/md/raid1.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/md') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index a7e69081187f..3cbf0ac2aaad 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1031,7 +1031,7 @@ static int raid1_spare_active(mddev_t *mddev) && !test_bit(Faulty, &rdev->flags) && !test_and_set_bit(In_sync, &rdev->flags)) { count++; - sysfs_notify_dirent(rdev->sysfs_state); + sysfs_notify_dirent_safe(rdev->sysfs_state); } } spin_lock_irqsave(&conf->device_lock, flags); -- cgit v1.2.3 From 3520fa4db7fc4ae1b0373dcecdaf720f620dab2d Mon Sep 17 00:00:00 2001 From: Jonathan Brassow Date: Wed, 27 Jul 2011 11:00:37 +1000 Subject: MD bitmap: Revert DM dirty log hooks Revert most of commit e384e58549a2e9a83071ad80280c1a9053cfd84c md/bitmap: prepare for storing write-intent-bitmap via dm-dirty-log. MD should not need to use DM's dirty log - we decided to use md's bitmaps instead. Keeping the DIV_ROUND_UP clean-ups that were part of commit e384e58549a2e9a83071ad80280c1a9053cfd84c, however. Signed-off-by: Jonathan Brassow Signed-off-by: NeilBrown --- drivers/md/bitmap.c | 133 +++++++++++++++++----------------------------------- drivers/md/bitmap.h | 5 -- drivers/md/md.h | 5 -- 3 files changed, 43 insertions(+), 100 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 39ff0ef8b76e..0dc6546b77a8 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -29,7 +29,6 @@ #include "md.h" #include "bitmap.h" -#include /* debug macros */ #define DEBUG 0 @@ -775,10 +774,8 @@ static inline unsigned long file_page_offset(struct bitmap *bitmap, unsigned lon * 0 or page 1 */ static inline struct page *filemap_get_page(struct bitmap *bitmap, - unsigned long chunk) + unsigned long chunk) { - if (bitmap->filemap == NULL) - return NULL; if (file_page_index(bitmap, chunk) >= bitmap->file_pages) return NULL; return bitmap->filemap[file_page_index(bitmap, chunk) @@ -878,28 +875,19 @@ enum bitmap_page_attr { static inline void set_page_attr(struct bitmap *bitmap, struct page *page, enum bitmap_page_attr attr) { - if (page) - __set_bit((page->index<<2) + attr, bitmap->filemap_attr); - else - __set_bit(attr, &bitmap->logattrs); + __set_bit((page->index<<2) + attr, bitmap->filemap_attr); } static inline void clear_page_attr(struct bitmap *bitmap, struct page *page, enum bitmap_page_attr attr) { - if (page) - __clear_bit((page->index<<2) + attr, bitmap->filemap_attr); - else - __clear_bit(attr, &bitmap->logattrs); + __clear_bit((page->index<<2) + attr, bitmap->filemap_attr); } static inline unsigned long test_page_attr(struct bitmap *bitmap, struct page *page, enum bitmap_page_attr attr) { - if (page) - return test_bit((page->index<<2) + attr, bitmap->filemap_attr); - else - return test_bit(attr, &bitmap->logattrs); + return test_bit((page->index<<2) + attr, bitmap->filemap_attr); } /* @@ -912,30 +900,26 @@ static inline unsigned long test_page_attr(struct bitmap *bitmap, struct page *p static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block) { unsigned long bit; - struct page *page = NULL; + struct page *page; void *kaddr; unsigned long chunk = block >> CHUNK_BLOCK_SHIFT(bitmap); - if (!bitmap->filemap) { - struct dm_dirty_log *log = bitmap->mddev->bitmap_info.log; - if (log) - log->type->mark_region(log, chunk); - } else { + if (!bitmap->filemap) + return; - page = filemap_get_page(bitmap, chunk); - if (!page) - return; - bit = file_page_offset(bitmap, chunk); + page = filemap_get_page(bitmap, chunk); + if (!page) + return; + bit = file_page_offset(bitmap, chunk); - /* set the bit */ - kaddr = kmap_atomic(page, KM_USER0); - if (bitmap->flags & BITMAP_HOSTENDIAN) - set_bit(bit, kaddr); - else - __set_bit_le(bit, kaddr); - kunmap_atomic(kaddr, KM_USER0); - PRINTK("set file bit %lu page %lu\n", bit, page->index); - } + /* set the bit */ + kaddr = kmap_atomic(page, KM_USER0); + if (bitmap->flags & BITMAP_HOSTENDIAN) + set_bit(bit, kaddr); + else + __set_bit_le(bit, kaddr); + kunmap_atomic(kaddr, KM_USER0); + PRINTK("set file bit %lu page %lu\n", bit, page->index); /* record page number so it gets flushed to disk when unplug occurs */ set_page_attr(bitmap, page, BITMAP_PAGE_DIRTY); } @@ -952,16 +936,6 @@ void bitmap_unplug(struct bitmap *bitmap) if (!bitmap) return; - if (!bitmap->filemap) { - /* Must be using a dirty_log */ - struct dm_dirty_log *log = bitmap->mddev->bitmap_info.log; - dirty = test_and_clear_bit(BITMAP_PAGE_DIRTY, &bitmap->logattrs); - need_write = test_and_clear_bit(BITMAP_PAGE_NEEDWRITE, &bitmap->logattrs); - if (dirty || need_write) - if (log->type->flush(log)) - bitmap->flags |= BITMAP_WRITE_ERROR; - goto out; - } /* look at each page to see if there are any set bits that need to be * flushed out to disk */ @@ -990,7 +964,6 @@ void bitmap_unplug(struct bitmap *bitmap) else md_super_wait(bitmap->mddev); } -out: if (bitmap->flags & BITMAP_WRITE_ERROR) bitmap_file_kick(bitmap); } @@ -1199,7 +1172,6 @@ void bitmap_daemon_work(mddev_t *mddev) struct page *page = NULL, *lastpage = NULL; sector_t blocks; void *paddr; - struct dm_dirty_log *log = mddev->bitmap_info.log; /* Use a mutex to guard daemon_work against * bitmap_destroy. @@ -1224,12 +1196,11 @@ void bitmap_daemon_work(mddev_t *mddev) spin_lock_irqsave(&bitmap->lock, flags); for (j = 0; j < bitmap->chunks; j++) { bitmap_counter_t *bmc; - if (!bitmap->filemap) { - if (!log) - /* error or shutdown */ - break; - } else - page = filemap_get_page(bitmap, j); + if (!bitmap->filemap) + /* error or shutdown */ + break; + + page = filemap_get_page(bitmap, j); if (page != lastpage) { /* skip this page unless it's marked as needing cleaning */ @@ -1298,19 +1269,16 @@ void bitmap_daemon_work(mddev_t *mddev) -1); /* clear the bit */ - if (page) { - paddr = kmap_atomic(page, KM_USER0); - if (bitmap->flags & BITMAP_HOSTENDIAN) - clear_bit(file_page_offset(bitmap, j), - paddr); - else - __clear_bit_le( + paddr = kmap_atomic(page, KM_USER0); + if (bitmap->flags & BITMAP_HOSTENDIAN) + clear_bit(file_page_offset(bitmap, j), + paddr); + else + __clear_bit_le( file_page_offset(bitmap, j), paddr); - kunmap_atomic(paddr, KM_USER0); - } else - log->type->clear_region(log, j); + kunmap_atomic(paddr, KM_USER0); } } else j |= PAGE_COUNTER_MASK; @@ -1318,16 +1286,12 @@ void bitmap_daemon_work(mddev_t *mddev) spin_unlock_irqrestore(&bitmap->lock, flags); /* now sync the final page */ - if (lastpage != NULL || log != NULL) { + if (lastpage != NULL) { spin_lock_irqsave(&bitmap->lock, flags); if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) { clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); spin_unlock_irqrestore(&bitmap->lock, flags); - if (lastpage) - write_page(bitmap, lastpage, 0); - else - if (log->type->flush(log)) - bitmap->flags |= BITMAP_WRITE_ERROR; + write_page(bitmap, lastpage, 0); } else { set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); spin_unlock_irqrestore(&bitmap->lock, flags); @@ -1769,12 +1733,10 @@ int bitmap_create(mddev_t *mddev) BUILD_BUG_ON(sizeof(bitmap_super_t) != 256); if (!file - && !mddev->bitmap_info.offset - && !mddev->bitmap_info.log) /* bitmap disabled, nothing to do */ + && !mddev->bitmap_info.offset) /* bitmap disabled, nothing to do */ return 0; BUG_ON(file && mddev->bitmap_info.offset); - BUG_ON(mddev->bitmap_info.offset && mddev->bitmap_info.log); bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL); if (!bitmap) @@ -1865,6 +1827,7 @@ int bitmap_create(mddev_t *mddev) int bitmap_load(mddev_t *mddev) { int err = 0; + sector_t start = 0; sector_t sector = 0; struct bitmap *bitmap = mddev->bitmap; @@ -1883,24 +1846,14 @@ int bitmap_load(mddev_t *mddev) } bitmap_close_sync(bitmap); - if (mddev->bitmap_info.log) { - unsigned long i; - struct dm_dirty_log *log = mddev->bitmap_info.log; - for (i = 0; i < bitmap->chunks; i++) - if (!log->type->in_sync(log, i, 1)) - bitmap_set_memory_bits(bitmap, - (sector_t)i << CHUNK_BLOCK_SHIFT(bitmap), - 1); - } else { - sector_t start = 0; - if (mddev->degraded == 0 - || bitmap->events_cleared == mddev->events) - /* no need to keep dirty bits to optimise a - * re-add of a missing device */ - start = mddev->recovery_cp; - - err = bitmap_init_from_disk(bitmap, start); - } + if (mddev->degraded == 0 + || bitmap->events_cleared == mddev->events) + /* no need to keep dirty bits to optimise a + * re-add of a missing device */ + start = mddev->recovery_cp; + + err = bitmap_init_from_disk(bitmap, start); + if (err) goto out; diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h index b2a127e891ac..a28f2e5588c6 100644 --- a/drivers/md/bitmap.h +++ b/drivers/md/bitmap.h @@ -212,10 +212,6 @@ struct bitmap { unsigned long file_pages; /* number of pages in the file */ int last_page_size; /* bytes in the last page */ - unsigned long logattrs; /* used when filemap_attr doesn't exist - * because we are working with a dirty_log - */ - unsigned long flags; int allclean; @@ -237,7 +233,6 @@ struct bitmap { wait_queue_head_t behind_wait; struct sysfs_dirent *sysfs_can_clear; - }; /* the bitmap API */ diff --git a/drivers/md/md.h b/drivers/md/md.h index de5455d30d41..7d906a96477a 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -307,11 +307,6 @@ struct mddev_s * hot-adding a bitmap. It should * eventually be settable by sysfs. */ - /* When md is serving under dm, it might use a - * dirty_log to store the bits. - */ - struct dm_dirty_log *log; - struct mutex mutex; unsigned long chunksize; unsigned long daemon_sleep; /* how many jiffies between updates? */ -- cgit v1.2.3 From 768e587e18c1c4ce0403b9532115752189422408 Mon Sep 17 00:00:00 2001 From: Jonathan Brassow Date: Wed, 27 Jul 2011 11:00:37 +1000 Subject: MD: generate an event when array sync is complete This patch causes MD to generate an event (for device-mapper) when the synchronization thread is reaped. This is expected behavior for device-mapper. Signed-off-by: Jonathan Brassow Signed-off-by: NeilBrown --- drivers/md/md.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index 2a3205053b2a..7caa096d02f6 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -7130,6 +7130,8 @@ static void reap_sync_thread(mddev_t *mddev) set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); sysfs_notify_dirent_safe(mddev->sysfs_action); md_new_event(mddev); + if (mddev->event_work.func) + queue_work(md_misc_wq, &mddev->event_work); } /* -- cgit v1.2.3 From a519b26dbe6533416d21b552053b0bf687f878d7 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 28 Jul 2011 07:56:24 +1000 Subject: md: remove suspicious size_of() When calling bioset_create we pass the size of the front_pad as sizeof(mddev) which looks suspicious as mddev is a pointer and so it looks like a common mistake where sizeof(*mddev) was intended. The size is actually correct as we want to store a pointer in the front padding of the bios created by the bioset, so make the intent more explicit by using sizeof(mddev_t *) Reported-by: Zdenek Kabelac Signed-off-by: NeilBrown --- drivers/md/md.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index 7caa096d02f6..4279b3b58d1a 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -4497,7 +4497,8 @@ int md_run(mddev_t *mddev) } if (mddev->bio_set == NULL) - mddev->bio_set = bioset_create(BIO_POOL_SIZE, sizeof(mddev)); + mddev->bio_set = bioset_create(BIO_POOL_SIZE, + sizeof(mddev_t *)); spin_lock(&pers_lock); pers = find_pers(mddev->level, mddev->clevel); -- cgit v1.2.3 From 2230dfe4ccc3add340dc6d437965b2de1d269fde Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 28 Jul 2011 11:31:46 +1000 Subject: md: beginnings of bad block management. This the first step in allowing md to track bad-blocks per-device so that we can fail individual blocks rather than the whole device. This patch just adds a data structure for recording bad blocks, with routines to add, remove, search the list. Signed-off-by: NeilBrown Reviewed-by: Namhyung Kim --- drivers/md/md.c | 415 +++++++++++++++++++++++++++++++++++++++++++++++++++++++- drivers/md/md.h | 48 ++++++- 2 files changed, 459 insertions(+), 4 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index 4279b3b58d1a..463a392c0705 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1952,6 +1952,9 @@ static void unbind_rdev_from_array(mdk_rdev_t * rdev) sysfs_remove_link(&rdev->kobj, "block"); sysfs_put(rdev->sysfs_state); rdev->sysfs_state = NULL; + kfree(rdev->badblocks.page); + rdev->badblocks.count = 0; + rdev->badblocks.page = NULL; /* We need to delay this, otherwise we can deadlock when * writing to 'remove' to "dev/state". We also need * to delay it due to rcu usage. @@ -2778,7 +2781,7 @@ static struct kobj_type rdev_ktype = { .default_attrs = rdev_default_attrs, }; -void md_rdev_init(mdk_rdev_t *rdev) +int md_rdev_init(mdk_rdev_t *rdev) { rdev->desc_nr = -1; rdev->saved_raid_disk = -1; @@ -2794,6 +2797,19 @@ void md_rdev_init(mdk_rdev_t *rdev) INIT_LIST_HEAD(&rdev->same_set); init_waitqueue_head(&rdev->blocked_wait); + + /* Add space to store bad block list. + * This reserves the space even on arrays where it cannot + * be used - I wonder if that matters + */ + rdev->badblocks.count = 0; + rdev->badblocks.shift = 0; + rdev->badblocks.page = kmalloc(PAGE_SIZE, GFP_KERNEL); + seqlock_init(&rdev->badblocks.lock); + if (rdev->badblocks.page == NULL) + return -ENOMEM; + + return 0; } EXPORT_SYMBOL_GPL(md_rdev_init); /* @@ -2819,8 +2835,11 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi return ERR_PTR(-ENOMEM); } - md_rdev_init(rdev); - if ((err = alloc_disk_sb(rdev))) + err = md_rdev_init(rdev); + if (err) + goto abort_free; + err = alloc_disk_sb(rdev); + if (err) goto abort_free; err = lock_rdev(rdev, newdev, super_format == -2); @@ -2865,6 +2884,7 @@ abort_free: unlock_rdev(rdev); free_disk_sb(rdev); } + kfree(rdev->badblocks.page); kfree(rdev); return ERR_PTR(err); } @@ -7327,6 +7347,395 @@ void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev) } EXPORT_SYMBOL(md_wait_for_blocked_rdev); + +/* Bad block management. + * We can record which blocks on each device are 'bad' and so just + * fail those blocks, or that stripe, rather than the whole device. + * Entries in the bad-block table are 64bits wide. This comprises: + * Length of bad-range, in sectors: 0-511 for lengths 1-512 + * Start of bad-range, sector offset, 54 bits (allows 8 exbibytes) + * A 'shift' can be set so that larger blocks are tracked and + * consequently larger devices can be covered. + * 'Acknowledged' flag - 1 bit. - the most significant bit. + * + * Locking of the bad-block table uses a seqlock so md_is_badblock + * might need to retry if it is very unlucky. + * We will sometimes want to check for bad blocks in a bi_end_io function, + * so we use the write_seqlock_irq variant. + * + * When looking for a bad block we specify a range and want to + * know if any block in the range is bad. So we binary-search + * to the last range that starts at-or-before the given endpoint, + * (or "before the sector after the target range") + * then see if it ends after the given start. + * We return + * 0 if there are no known bad blocks in the range + * 1 if there are known bad block which are all acknowledged + * -1 if there are bad blocks which have not yet been acknowledged in metadata. + * plus the start/length of the first bad section we overlap. + */ +int md_is_badblock(struct badblocks *bb, sector_t s, int sectors, + sector_t *first_bad, int *bad_sectors) +{ + int hi; + int lo = 0; + u64 *p = bb->page; + int rv = 0; + sector_t target = s + sectors; + unsigned seq; + + if (bb->shift > 0) { + /* round the start down, and the end up */ + s >>= bb->shift; + target += (1<shift) - 1; + target >>= bb->shift; + sectors = target - s; + } + /* 'target' is now the first block after the bad range */ + +retry: + seq = read_seqbegin(&bb->lock); + + hi = bb->count; + + /* Binary search between lo and hi for 'target' + * i.e. for the last range that starts before 'target' + */ + /* INVARIANT: ranges before 'lo' and at-or-after 'hi' + * are known not to be the last range before target. + * VARIANT: hi-lo is the number of possible + * ranges, and decreases until it reaches 1 + */ + while (hi - lo > 1) { + int mid = (lo + hi) / 2; + sector_t a = BB_OFFSET(p[mid]); + if (a < target) + /* This could still be the one, earlier ranges + * could not. */ + lo = mid; + else + /* This and later ranges are definitely out. */ + hi = mid; + } + /* 'lo' might be the last that started before target, but 'hi' isn't */ + if (hi > lo) { + /* need to check all range that end after 's' to see if + * any are unacknowledged. + */ + while (lo >= 0 && + BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) { + if (BB_OFFSET(p[lo]) < target) { + /* starts before the end, and finishes after + * the start, so they must overlap + */ + if (rv != -1 && BB_ACK(p[lo])) + rv = 1; + else + rv = -1; + *first_bad = BB_OFFSET(p[lo]); + *bad_sectors = BB_LEN(p[lo]); + } + lo--; + } + } + + if (read_seqretry(&bb->lock, seq)) + goto retry; + + return rv; +} +EXPORT_SYMBOL_GPL(md_is_badblock); + +/* + * Add a range of bad blocks to the table. + * This might extend the table, or might contract it + * if two adjacent ranges can be merged. + * We binary-search to find the 'insertion' point, then + * decide how best to handle it. + */ +static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors, + int acknowledged) +{ + u64 *p; + int lo, hi; + int rv = 1; + + if (bb->shift < 0) + /* badblocks are disabled */ + return 0; + + if (bb->shift) { + /* round the start down, and the end up */ + sector_t next = s + sectors; + s >>= bb->shift; + next += (1<shift) - 1; + next >>= bb->shift; + sectors = next - s; + } + + write_seqlock_irq(&bb->lock); + + p = bb->page; + lo = 0; + hi = bb->count; + /* Find the last range that starts at-or-before 's' */ + while (hi - lo > 1) { + int mid = (lo + hi) / 2; + sector_t a = BB_OFFSET(p[mid]); + if (a <= s) + lo = mid; + else + hi = mid; + } + if (hi > lo && BB_OFFSET(p[lo]) > s) + hi = lo; + + if (hi > lo) { + /* we found a range that might merge with the start + * of our new range + */ + sector_t a = BB_OFFSET(p[lo]); + sector_t e = a + BB_LEN(p[lo]); + int ack = BB_ACK(p[lo]); + if (e >= s) { + /* Yes, we can merge with a previous range */ + if (s == a && s + sectors >= e) + /* new range covers old */ + ack = acknowledged; + else + ack = ack && acknowledged; + + if (e < s + sectors) + e = s + sectors; + if (e - a <= BB_MAX_LEN) { + p[lo] = BB_MAKE(a, e-a, ack); + s = e; + } else { + /* does not all fit in one range, + * make p[lo] maximal + */ + if (BB_LEN(p[lo]) != BB_MAX_LEN) + p[lo] = BB_MAKE(a, BB_MAX_LEN, ack); + s = a + BB_MAX_LEN; + } + sectors = e - s; + } + } + if (sectors && hi < bb->count) { + /* 'hi' points to the first range that starts after 's'. + * Maybe we can merge with the start of that range */ + sector_t a = BB_OFFSET(p[hi]); + sector_t e = a + BB_LEN(p[hi]); + int ack = BB_ACK(p[hi]); + if (a <= s + sectors) { + /* merging is possible */ + if (e <= s + sectors) { + /* full overlap */ + e = s + sectors; + ack = acknowledged; + } else + ack = ack && acknowledged; + + a = s; + if (e - a <= BB_MAX_LEN) { + p[hi] = BB_MAKE(a, e-a, ack); + s = e; + } else { + p[hi] = BB_MAKE(a, BB_MAX_LEN, ack); + s = a + BB_MAX_LEN; + } + sectors = e - s; + lo = hi; + hi++; + } + } + if (sectors == 0 && hi < bb->count) { + /* we might be able to combine lo and hi */ + /* Note: 's' is at the end of 'lo' */ + sector_t a = BB_OFFSET(p[hi]); + int lolen = BB_LEN(p[lo]); + int hilen = BB_LEN(p[hi]); + int newlen = lolen + hilen - (s - a); + if (s >= a && newlen < BB_MAX_LEN) { + /* yes, we can combine them */ + int ack = BB_ACK(p[lo]) && BB_ACK(p[hi]); + p[lo] = BB_MAKE(BB_OFFSET(p[lo]), newlen, ack); + memmove(p + hi, p + hi + 1, + (bb->count - hi - 1) * 8); + bb->count--; + } + } + while (sectors) { + /* didn't merge (it all). + * Need to add a range just before 'hi' */ + if (bb->count >= MD_MAX_BADBLOCKS) { + /* No room for more */ + rv = 0; + break; + } else { + int this_sectors = sectors; + memmove(p + hi + 1, p + hi, + (bb->count - hi) * 8); + bb->count++; + + if (this_sectors > BB_MAX_LEN) + this_sectors = BB_MAX_LEN; + p[hi] = BB_MAKE(s, this_sectors, acknowledged); + sectors -= this_sectors; + s += this_sectors; + } + } + + bb->changed = 1; + write_sequnlock_irq(&bb->lock); + + return rv; +} + +int rdev_set_badblocks(mdk_rdev_t *rdev, sector_t s, int sectors, + int acknowledged) +{ + int rv = md_set_badblocks(&rdev->badblocks, + s + rdev->data_offset, sectors, acknowledged); + if (rv) { + /* Make sure they get written out promptly */ + set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags); + md_wakeup_thread(rdev->mddev->thread); + } + return rv; +} +EXPORT_SYMBOL_GPL(rdev_set_badblocks); + +/* + * Remove a range of bad blocks from the table. + * This may involve extending the table if we spilt a region, + * but it must not fail. So if the table becomes full, we just + * drop the remove request. + */ +static int md_clear_badblocks(struct badblocks *bb, sector_t s, int sectors) +{ + u64 *p; + int lo, hi; + sector_t target = s + sectors; + int rv = 0; + + if (bb->shift > 0) { + /* When clearing we round the start up and the end down. + * This should not matter as the shift should align with + * the block size and no rounding should ever be needed. + * However it is better the think a block is bad when it + * isn't than to think a block is not bad when it is. + */ + s += (1<shift) - 1; + s >>= bb->shift; + target >>= bb->shift; + sectors = target - s; + } + + write_seqlock_irq(&bb->lock); + + p = bb->page; + lo = 0; + hi = bb->count; + /* Find the last range that starts before 'target' */ + while (hi - lo > 1) { + int mid = (lo + hi) / 2; + sector_t a = BB_OFFSET(p[mid]); + if (a < target) + lo = mid; + else + hi = mid; + } + if (hi > lo) { + /* p[lo] is the last range that could overlap the + * current range. Earlier ranges could also overlap, + * but only this one can overlap the end of the range. + */ + if (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) { + /* Partial overlap, leave the tail of this range */ + int ack = BB_ACK(p[lo]); + sector_t a = BB_OFFSET(p[lo]); + sector_t end = a + BB_LEN(p[lo]); + + if (a < s) { + /* we need to split this range */ + if (bb->count >= MD_MAX_BADBLOCKS) { + rv = 0; + goto out; + } + memmove(p+lo+1, p+lo, (bb->count - lo) * 8); + bb->count++; + p[lo] = BB_MAKE(a, s-a, ack); + lo++; + } + p[lo] = BB_MAKE(target, end - target, ack); + /* there is no longer an overlap */ + hi = lo; + lo--; + } + while (lo >= 0 && + BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) { + /* This range does overlap */ + if (BB_OFFSET(p[lo]) < s) { + /* Keep the early parts of this range. */ + int ack = BB_ACK(p[lo]); + sector_t start = BB_OFFSET(p[lo]); + p[lo] = BB_MAKE(start, s - start, ack); + /* now low doesn't overlap, so.. */ + break; + } + lo--; + } + /* 'lo' is strictly before, 'hi' is strictly after, + * anything between needs to be discarded + */ + if (hi - lo > 1) { + memmove(p+lo+1, p+hi, (bb->count - hi) * 8); + bb->count -= (hi - lo - 1); + } + } + + bb->changed = 1; +out: + write_sequnlock_irq(&bb->lock); + return rv; +} + +int rdev_clear_badblocks(mdk_rdev_t *rdev, sector_t s, int sectors) +{ + return md_clear_badblocks(&rdev->badblocks, + s + rdev->data_offset, + sectors); +} +EXPORT_SYMBOL_GPL(rdev_clear_badblocks); + +/* + * Acknowledge all bad blocks in a list. + * This only succeeds if ->changed is clear. It is used by + * in-kernel metadata updates + */ +void md_ack_all_badblocks(struct badblocks *bb) +{ + if (bb->page == NULL || bb->changed) + /* no point even trying */ + return; + write_seqlock_irq(&bb->lock); + + if (bb->changed == 0) { + u64 *p = bb->page; + int i; + for (i = 0; i < bb->count ; i++) { + if (!BB_ACK(p[i])) { + sector_t start = BB_OFFSET(p[i]); + int len = BB_LEN(p[i]); + p[i] = BB_MAKE(start, len, 1); + } + } + } + write_sequnlock_irq(&bb->lock); +} +EXPORT_SYMBOL_GPL(md_ack_all_badblocks); + static int md_notify_reboot(struct notifier_block *this, unsigned long code, void *x) { diff --git a/drivers/md/md.h b/drivers/md/md.h index 7d906a96477a..85af8433f8b8 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -29,6 +29,13 @@ typedef struct mddev_s mddev_t; typedef struct mdk_rdev_s mdk_rdev_t; +/* Bad block numbers are stored sorted in a single page. + * 64bits is used for each block or extent. + * 54 bits are sector number, 9 bits are extent size, + * 1 bit is an 'acknowledged' flag. + */ +#define MD_MAX_BADBLOCKS (PAGE_SIZE/8) + /* * MD's 'extended' device */ @@ -111,8 +118,47 @@ struct mdk_rdev_s struct sysfs_dirent *sysfs_state; /* handle for 'state' * sysfs entry */ + + struct badblocks { + int count; /* count of bad blocks */ + int shift; /* shift from sectors to block size + * a -ve shift means badblocks are + * disabled.*/ + u64 *page; /* badblock list */ + int changed; + seqlock_t lock; + } badblocks; }; +#define BB_LEN_MASK (0x00000000000001FFULL) +#define BB_OFFSET_MASK (0x7FFFFFFFFFFFFE00ULL) +#define BB_ACK_MASK (0x8000000000000000ULL) +#define BB_MAX_LEN 512 +#define BB_OFFSET(x) (((x) & BB_OFFSET_MASK) >> 9) +#define BB_LEN(x) (((x) & BB_LEN_MASK) + 1) +#define BB_ACK(x) (!!((x) & BB_ACK_MASK)) +#define BB_MAKE(a, l, ack) (((a)<<9) | ((l)-1) | ((u64)(!!(ack)) << 63)) + +extern int md_is_badblock(struct badblocks *bb, sector_t s, int sectors, + sector_t *first_bad, int *bad_sectors); +static inline int is_badblock(mdk_rdev_t *rdev, sector_t s, int sectors, + sector_t *first_bad, int *bad_sectors) +{ + if (unlikely(rdev->badblocks.count)) { + int rv = md_is_badblock(&rdev->badblocks, rdev->data_offset + s, + sectors, + first_bad, bad_sectors); + if (rv) + *first_bad -= rdev->data_offset; + return rv; + } + return 0; +} +extern int rdev_set_badblocks(mdk_rdev_t *rdev, sector_t s, int sectors, + int acknowledged); +extern int rdev_clear_badblocks(mdk_rdev_t *rdev, sector_t s, int sectors); +extern void md_ack_all_badblocks(struct badblocks *bb); + struct mddev_s { void *private; @@ -517,7 +563,7 @@ extern void mddev_init(mddev_t *mddev); extern int md_run(mddev_t *mddev); extern void md_stop(mddev_t *mddev); extern void md_stop_writes(mddev_t *mddev); -extern void md_rdev_init(mdk_rdev_t *rdev); +extern int md_rdev_init(mdk_rdev_t *rdev); extern void mddev_suspend(mddev_t *mddev); extern void mddev_resume(mddev_t *mddev); -- cgit v1.2.3 From 16c791a5af3e50d0c11760485fd68e5829f3be4d Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 28 Jul 2011 11:31:47 +1000 Subject: md/bad-block-log: add sysfs interface for accessing bad-block-log. This can show the log (providing it fits in one page) and allows bad blocks to be 'acknowledged' meaning that they have safely been recorded in metadata. Clearing bad blocks is not allowed via sysfs (except for code testing). A bad block can only be cleared when a write to the block succeeds. Signed-off-by: NeilBrown Reviewed-by: Namhyung Kim --- drivers/md/md.c | 123 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 123 insertions(+) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index 463a392c0705..7f5ffc51c28e 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -2711,6 +2711,35 @@ static ssize_t recovery_start_store(mdk_rdev_t *rdev, const char *buf, size_t le static struct rdev_sysfs_entry rdev_recovery_start = __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store); + +static ssize_t +badblocks_show(struct badblocks *bb, char *page, int unack); +static ssize_t +badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack); + +static ssize_t bb_show(mdk_rdev_t *rdev, char *page) +{ + return badblocks_show(&rdev->badblocks, page, 0); +} +static ssize_t bb_store(mdk_rdev_t *rdev, const char *page, size_t len) +{ + return badblocks_store(&rdev->badblocks, page, len, 0); +} +static struct rdev_sysfs_entry rdev_bad_blocks = +__ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store); + + +static ssize_t ubb_show(mdk_rdev_t *rdev, char *page) +{ + return badblocks_show(&rdev->badblocks, page, 1); +} +static ssize_t ubb_store(mdk_rdev_t *rdev, const char *page, size_t len) +{ + return badblocks_store(&rdev->badblocks, page, len, 1); +} +static struct rdev_sysfs_entry rdev_unack_bad_blocks = +__ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store); + static struct attribute *rdev_default_attrs[] = { &rdev_state.attr, &rdev_errors.attr, @@ -2718,6 +2747,8 @@ static struct attribute *rdev_default_attrs[] = { &rdev_offset.attr, &rdev_size.attr, &rdev_recovery_start.attr, + &rdev_bad_blocks.attr, + &rdev_unack_bad_blocks.attr, NULL, }; static ssize_t @@ -7736,6 +7767,98 @@ void md_ack_all_badblocks(struct badblocks *bb) } EXPORT_SYMBOL_GPL(md_ack_all_badblocks); +/* sysfs access to bad-blocks list. + * We present two files. + * 'bad-blocks' lists sector numbers and lengths of ranges that + * are recorded as bad. The list is truncated to fit within + * the one-page limit of sysfs. + * Writing "sector length" to this file adds an acknowledged + * bad block list. + * 'unacknowledged-bad-blocks' lists bad blocks that have not yet + * been acknowledged. Writing to this file adds bad blocks + * without acknowledging them. This is largely for testing. + */ + +static ssize_t +badblocks_show(struct badblocks *bb, char *page, int unack) +{ + size_t len; + int i; + u64 *p = bb->page; + unsigned seq; + + if (bb->shift < 0) + return 0; + +retry: + seq = read_seqbegin(&bb->lock); + + len = 0; + i = 0; + + while (len < PAGE_SIZE && i < bb->count) { + sector_t s = BB_OFFSET(p[i]); + unsigned int length = BB_LEN(p[i]); + int ack = BB_ACK(p[i]); + i++; + + if (unack && ack) + continue; + + len += snprintf(page+len, PAGE_SIZE-len, "%llu %u\n", + (unsigned long long)s << bb->shift, + length << bb->shift); + } + + if (read_seqretry(&bb->lock, seq)) + goto retry; + + return len; +} + +#define DO_DEBUG 1 + +static ssize_t +badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack) +{ + unsigned long long sector; + int length; + char newline; +#ifdef DO_DEBUG + /* Allow clearing via sysfs *only* for testing/debugging. + * Normally only a successful write may clear a badblock + */ + int clear = 0; + if (page[0] == '-') { + clear = 1; + page++; + } +#endif /* DO_DEBUG */ + + switch (sscanf(page, "%llu %d%c", §or, &length, &newline)) { + case 3: + if (newline != '\n') + return -EINVAL; + case 2: + if (length <= 0) + return -EINVAL; + break; + default: + return -EINVAL; + } + +#ifdef DO_DEBUG + if (clear) { + md_clear_badblocks(bb, sector, length); + return len; + } +#endif /* DO_DEBUG */ + if (md_set_badblocks(bb, sector, length, !unack)) + return len; + else + return -ENOSPC; +} + static int md_notify_reboot(struct notifier_block *this, unsigned long code, void *x) { -- cgit v1.2.3 From 34b343cff4354ab9864be83be88405fd53d928a0 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 28 Jul 2011 11:31:47 +1000 Subject: md: don't allow arrays to contain devices with bad blocks. As no personality understand bad block lists yet, we must reject any device that is known to contain bad blocks. As the personalities get taught, these tests can be removed. This only applies to raid1/raid5/raid10. For linear/raid0/multipath/faulty the whole concept of bad blocks doesn't mean anything so there is no point adding the checks. Signed-off-by: NeilBrown Reviewed-by: Namhyung Kim --- drivers/md/raid1.c | 7 +++++++ drivers/md/raid10.c | 8 ++++++++ drivers/md/raid5.c | 7 +++++++ 3 files changed, 22 insertions(+) (limited to 'drivers/md') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 3cbf0ac2aaad..8db311d7cddc 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1055,6 +1055,9 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) if (mddev->recovery_disabled == conf->recovery_disabled) return -EBUSY; + if (rdev->badblocks.count) + return -EINVAL; + if (rdev->raid_disk >= 0) first = last = rdev->raid_disk; @@ -1994,6 +1997,10 @@ static int run(mddev_t *mddev) blk_queue_segment_boundary(mddev->queue, PAGE_CACHE_SIZE - 1); } + if (rdev->badblocks.count) { + printk(KERN_ERR "md/raid1: Cannot handle bad blocks yet\n"); + return -EINVAL; + } } mddev->degraded = 0; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 5def27c28be7..8aadd2f52dc8 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1101,6 +1101,9 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) int first = 0; int last = conf->raid_disks - 1; + if (rdev->badblocks.count) + return -EINVAL; + if (mddev->recovery_cp < MaxSector) /* only hot-add to in-sync arrays, as recovery is * very different from resync @@ -2263,6 +2266,11 @@ static int run(mddev_t *mddev) (conf->raid_disks / conf->near_copies)); list_for_each_entry(rdev, &mddev->disks, same_set) { + + if (rdev->badblocks.count) { + printk(KERN_ERR "md/raid10: cannot handle bad blocks yet\n"); + goto out_free_conf; + } disk_idx = rdev->raid_disk; if (disk_idx >= conf->raid_disks || disk_idx < 0) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index b874f42694e2..719445004dd9 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -4667,6 +4667,10 @@ static int run(mddev_t *mddev) * 0 for a fully functional array, 1 or 2 for a degraded array. */ list_for_each_entry(rdev, &mddev->disks, same_set) { + if (rdev->badblocks.count) { + printk(KERN_ERR "md/raid5: cannot handle bad blocks yet\n"); + goto abort; + } if (rdev->raid_disk < 0) continue; if (test_bit(In_sync, &rdev->flags)) { @@ -4975,6 +4979,9 @@ static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) int first = 0; int last = conf->raid_disks - 1; + if (rdev->badblocks.count) + return -EINVAL; + if (has_failed(conf)) /* no point adding a device */ return -EINVAL; -- cgit v1.2.3 From 2699b67223aca6b1450fc2f72e40fada952afc85 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 28 Jul 2011 11:31:47 +1000 Subject: md: load/store badblock list from v1.x metadata Space must have been allocated when array was created. A feature flag is set when the badblock list is non-empty, to ensure old kernels don't load and trust the whole device. We only update the on-disk badblocklist when it has changed. If the badblocklist (or other metadata) is stored on a bad block, we don't cope very well. If metadata has no room for bad block, flag bad-blocks as disabled, and do the same for 0.90 metadata. Signed-off-by: NeilBrown --- drivers/md/md.c | 108 +++++++++++++++++++++++++++++++++++++++++++--- drivers/md/md.h | 5 ++- include/linux/raid/md_p.h | 14 ++++-- 3 files changed, 116 insertions(+), 11 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index 7f5ffc51c28e..e2ec8bd0fb94 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -757,6 +757,10 @@ static void free_disk_sb(mdk_rdev_t * rdev) rdev->sb_start = 0; rdev->sectors = 0; } + if (rdev->bb_page) { + put_page(rdev->bb_page); + rdev->bb_page = NULL; + } } @@ -1395,6 +1399,8 @@ static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb) return cpu_to_le32(csum); } +static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors, + int acknowledged); static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) { struct mdp_superblock_1 *sb; @@ -1473,6 +1479,47 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) else rdev->desc_nr = le32_to_cpu(sb->dev_number); + if (!rdev->bb_page) { + rdev->bb_page = alloc_page(GFP_KERNEL); + if (!rdev->bb_page) + return -ENOMEM; + } + if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) && + rdev->badblocks.count == 0) { + /* need to load the bad block list. + * Currently we limit it to one page. + */ + s32 offset; + sector_t bb_sector; + u64 *bbp; + int i; + int sectors = le16_to_cpu(sb->bblog_size); + if (sectors > (PAGE_SIZE / 512)) + return -EINVAL; + offset = le32_to_cpu(sb->bblog_offset); + if (offset == 0) + return -EINVAL; + bb_sector = (long long)offset; + if (!sync_page_io(rdev, bb_sector, sectors << 9, + rdev->bb_page, READ, true)) + return -EIO; + bbp = (u64 *)page_address(rdev->bb_page); + rdev->badblocks.shift = sb->bblog_shift; + for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) { + u64 bb = le64_to_cpu(*bbp); + int count = bb & (0x3ff); + u64 sector = bb >> 10; + sector <<= sb->bblog_shift; + count <<= sb->bblog_shift; + if (bb + 1 == 0) + break; + if (md_set_badblocks(&rdev->badblocks, + sector, count, 1) == 0) + return -EINVAL; + } + } else if (sb->bblog_offset == 0) + rdev->badblocks.shift = -1; + if (!refdev) { ret = 1; } else { @@ -1624,7 +1671,6 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) sb->pad0 = 0; sb->recovery_offset = cpu_to_le64(0); memset(sb->pad1, 0, sizeof(sb->pad1)); - memset(sb->pad2, 0, sizeof(sb->pad2)); memset(sb->pad3, 0, sizeof(sb->pad3)); sb->utime = cpu_to_le64((__u64)mddev->utime); @@ -1664,6 +1710,40 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors); } + if (rdev->badblocks.count == 0) + /* Nothing to do for bad blocks*/ ; + else if (sb->bblog_offset == 0) + /* Cannot record bad blocks on this device */ + md_error(mddev, rdev); + else { + struct badblocks *bb = &rdev->badblocks; + u64 *bbp = (u64 *)page_address(rdev->bb_page); + u64 *p = bb->page; + sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS); + if (bb->changed) { + unsigned seq; + +retry: + seq = read_seqbegin(&bb->lock); + + memset(bbp, 0xff, PAGE_SIZE); + + for (i = 0 ; i < bb->count ; i++) { + u64 internal_bb = *p++; + u64 store_bb = ((BB_OFFSET(internal_bb) << 10) + | BB_LEN(internal_bb)); + *bbp++ = cpu_to_le64(store_bb); + } + if (read_seqretry(&bb->lock, seq)) + goto retry; + + bb->sector = (rdev->sb_start + + (int)le32_to_cpu(sb->bblog_offset)); + bb->size = le16_to_cpu(sb->bblog_size); + bb->changed = 0; + } + } + max_dev = 0; list_for_each_entry(rdev2, &mddev->disks, same_set) if (rdev2->desc_nr+1 > max_dev) @@ -2196,6 +2276,7 @@ static void md_update_sb(mddev_t * mddev, int force_change) mdk_rdev_t *rdev; int sync_req; int nospares = 0; + int any_badblocks_changed = 0; repeat: /* First make sure individual recovery_offsets are correct */ @@ -2267,6 +2348,11 @@ repeat: MD_BUG(); mddev->events --; } + + list_for_each_entry(rdev, &mddev->disks, same_set) + if (rdev->badblocks.changed) + any_badblocks_changed++; + sync_sbs(mddev, nospares); spin_unlock_irq(&mddev->write_lock); @@ -2292,6 +2378,13 @@ repeat: bdevname(rdev->bdev,b), (unsigned long long)rdev->sb_start); rdev->sb_events = mddev->events; + if (rdev->badblocks.size) { + md_super_write(mddev, rdev, + rdev->badblocks.sector, + rdev->badblocks.size << 9, + rdev->bb_page); + rdev->badblocks.size = 0; + } } else dprintk(")\n"); @@ -2315,6 +2408,9 @@ repeat: if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) sysfs_notify(&mddev->kobj, NULL, "sync_completed"); + if (any_badblocks_changed) + list_for_each_entry(rdev, &mddev->disks, same_set) + md_ack_all_badblocks(&rdev->badblocks); } /* words written to sysfs files may, or may not, be \n terminated. @@ -2822,6 +2918,8 @@ int md_rdev_init(mdk_rdev_t *rdev) rdev->sb_events = 0; rdev->last_read_error.tv_sec = 0; rdev->last_read_error.tv_nsec = 0; + rdev->sb_loaded = 0; + rdev->bb_page = NULL; atomic_set(&rdev->nr_pending, 0); atomic_set(&rdev->read_errors, 0); atomic_set(&rdev->corrected_errors, 0); @@ -2910,11 +3008,9 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi return rdev; abort_free: - if (rdev->sb_page) { - if (rdev->bdev) - unlock_rdev(rdev); - free_disk_sb(rdev); - } + if (rdev->bdev) + unlock_rdev(rdev); + free_disk_sb(rdev); kfree(rdev->badblocks.page); kfree(rdev); return ERR_PTR(err); diff --git a/drivers/md/md.h b/drivers/md/md.h index 85af8433f8b8..aea9e9ff8a33 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -55,7 +55,7 @@ struct mdk_rdev_s struct block_device *meta_bdev; struct block_device *bdev; /* block device handle */ - struct page *sb_page; + struct page *sb_page, *bb_page; int sb_loaded; __u64 sb_events; sector_t data_offset; /* start of data in array */ @@ -127,6 +127,9 @@ struct mdk_rdev_s u64 *page; /* badblock list */ int changed; seqlock_t lock; + + sector_t sector; + sector_t size; /* in sectors */ } badblocks; }; diff --git a/include/linux/raid/md_p.h b/include/linux/raid/md_p.h index 75cbf4f62fe8..9e65d9e20662 100644 --- a/include/linux/raid/md_p.h +++ b/include/linux/raid/md_p.h @@ -245,10 +245,16 @@ struct mdp_superblock_1 { __u8 device_uuid[16]; /* user-space setable, ignored by kernel */ __u8 devflags; /* per-device flags. Only one defined...*/ #define WriteMostly1 1 /* mask for writemostly flag in above */ - __u8 pad2[64-57]; /* set to 0 when writing */ + /* Bad block log. If there are any bad blocks the feature flag is set. + * If offset and size are non-zero, that space is reserved and available + */ + __u8 bblog_shift; /* shift from sectors to block size */ + __le16 bblog_size; /* number of sectors reserved for list */ + __le32 bblog_offset; /* sector offset from superblock to bblog, + * signed - not unsigned */ /* array state information - 64 bytes */ - __le64 utime; /* 40 bits second, 24 btes microseconds */ + __le64 utime; /* 40 bits second, 24 bits microseconds */ __le64 events; /* incremented when superblock updated */ __le64 resync_offset; /* data before this offset (from data_offset) known to be in sync */ __le32 sb_csum; /* checksum up to devs[max_dev] */ @@ -270,8 +276,8 @@ struct mdp_superblock_1 { * must be honoured */ #define MD_FEATURE_RESHAPE_ACTIVE 4 +#define MD_FEATURE_BAD_BLOCKS 8 /* badblock list is not empty */ -#define MD_FEATURE_ALL (1|2|4) +#define MD_FEATURE_ALL (1|2|4|8) #endif - -- cgit v1.2.3 From 9f2f3830789a4c9c1af2d1437d407c43e05136e6 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 28 Jul 2011 11:31:47 +1000 Subject: md: Disable bad blocks and v0.90 metadata. v0.90 metadata cannot record bad blocks, so when loading metadata for such a device, set shift to -1. Signed-off-by: NeilBrown --- drivers/md/md.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index e2ec8bd0fb94..7ae3c5a18001 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1058,6 +1058,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version rdev->preferred_minor = sb->md_minor; rdev->data_offset = 0; rdev->sb_size = MD_SB_BYTES; + rdev->badblocks.shift = -1; if (sb->level == LEVEL_MULTIPATH) rdev->desc_nr = -1; @@ -3004,6 +3005,9 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi goto abort_free; } } + if (super_format == -1) + /* hot-add for 0.90, or non-persistent: so no badblocks */ + rdev->badblocks.shift = -1; return rdev; -- cgit v1.2.3 From d2eb35acfdccbe2a3622ed6cc441a5482148423b Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 28 Jul 2011 11:31:48 +1000 Subject: md/raid1: avoid reading from known bad blocks. Now that we have a bad block list, we should not read from those blocks. There are several main parts to this: 1/ read_balance needs to check for bad blocks, and return not only the chosen device, but also how many good blocks are available there. 2/ fix_read_error needs to avoid trying to read from bad blocks. 3/ read submission must be ready to issue multiple reads to different devices as different bad blocks on different devices could mean that a single large read cannot be served by any one device, but can still be served by the array. This requires keeping count of the number of outstanding requests per bio. This count is stored in 'bi_phys_segments' 4/ retrying a read needs to also be ready to submit a smaller read and queue another request for the rest. This does not yet handle bad blocks when reading to perform resync, recovery, or check. 'md_trim_bio' will also be used for RAID10, so put it in md.c and export it. Signed-off-by: NeilBrown --- drivers/md/md.c | 49 +++++++++++++ drivers/md/md.h | 1 + drivers/md/raid1.c | 208 +++++++++++++++++++++++++++++++++++++++++++++-------- drivers/md/raid1.h | 4 ++ 4 files changed, 233 insertions(+), 29 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index 7ae3c5a18001..48217e8aa0eb 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -215,6 +215,55 @@ struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask, } EXPORT_SYMBOL_GPL(bio_clone_mddev); +void md_trim_bio(struct bio *bio, int offset, int size) +{ + /* 'bio' is a cloned bio which we need to trim to match + * the given offset and size. + * This requires adjusting bi_sector, bi_size, and bi_io_vec + */ + int i; + struct bio_vec *bvec; + int sofar = 0; + + size <<= 9; + if (offset == 0 && size == bio->bi_size) + return; + + bio->bi_sector += offset; + bio->bi_size = size; + offset <<= 9; + clear_bit(BIO_SEG_VALID, &bio->bi_flags); + + while (bio->bi_idx < bio->bi_vcnt && + bio->bi_io_vec[bio->bi_idx].bv_len <= offset) { + /* remove this whole bio_vec */ + offset -= bio->bi_io_vec[bio->bi_idx].bv_len; + bio->bi_idx++; + } + if (bio->bi_idx < bio->bi_vcnt) { + bio->bi_io_vec[bio->bi_idx].bv_offset += offset; + bio->bi_io_vec[bio->bi_idx].bv_len -= offset; + } + /* avoid any complications with bi_idx being non-zero*/ + if (bio->bi_idx) { + memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx, + (bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec)); + bio->bi_vcnt -= bio->bi_idx; + bio->bi_idx = 0; + } + /* Make sure vcnt and last bv are not too big */ + bio_for_each_segment(bvec, bio, i) { + if (sofar + bvec->bv_len > size) + bvec->bv_len = size - sofar; + if (bvec->bv_len == 0) { + bio->bi_vcnt = i; + break; + } + sofar += bvec->bv_len; + } +} +EXPORT_SYMBOL_GPL(md_trim_bio); + /* * We have a system wide 'event count' that is incremented * on any 'interesting' event, and readers of /proc/mdstat diff --git a/drivers/md/md.h b/drivers/md/md.h index aea9e9ff8a33..7c3192c0a29a 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -575,4 +575,5 @@ extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask, extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, mddev_t *mddev); extern int mddev_check_plugged(mddev_t *mddev); +extern void md_trim_bio(struct bio *bio, int offset, int size); #endif /* _MD_MD_H */ diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 8db311d7cddc..cc3939dc9e3d 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -41,11 +41,7 @@ #include "bitmap.h" #define DEBUG 0 -#if DEBUG -#define PRINTK(x...) printk(x) -#else -#define PRINTK(x...) -#endif +#define PRINTK(x...) do { if (DEBUG) printk(x); } while (0) /* * Number of guaranteed r1bios in case of extreme VM load: @@ -177,12 +173,6 @@ static void free_r1bio(r1bio_t *r1_bio) { conf_t *conf = r1_bio->mddev->private; - /* - * Wake up any possible resync thread that waits for the device - * to go idle. - */ - allow_barrier(conf); - put_all_bios(conf, r1_bio); mempool_free(r1_bio, conf->r1bio_pool); } @@ -223,6 +213,33 @@ static void reschedule_retry(r1bio_t *r1_bio) * operation and are ready to return a success/failure code to the buffer * cache layer. */ +static void call_bio_endio(r1bio_t *r1_bio) +{ + struct bio *bio = r1_bio->master_bio; + int done; + conf_t *conf = r1_bio->mddev->private; + + if (bio->bi_phys_segments) { + unsigned long flags; + spin_lock_irqsave(&conf->device_lock, flags); + bio->bi_phys_segments--; + done = (bio->bi_phys_segments == 0); + spin_unlock_irqrestore(&conf->device_lock, flags); + } else + done = 1; + + if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) + clear_bit(BIO_UPTODATE, &bio->bi_flags); + if (done) { + bio_endio(bio, 0); + /* + * Wake up any possible resync thread that waits for the device + * to go idle. + */ + allow_barrier(conf); + } +} + static void raid_end_bio_io(r1bio_t *r1_bio) { struct bio *bio = r1_bio->master_bio; @@ -235,8 +252,7 @@ static void raid_end_bio_io(r1bio_t *r1_bio) (unsigned long long) bio->bi_sector + (bio->bi_size >> 9) - 1); - bio_endio(bio, - test_bit(R1BIO_Uptodate, &r1_bio->state) ? 0 : -EIO); + call_bio_endio(r1_bio); } free_r1bio(r1_bio); } @@ -295,6 +311,7 @@ static void raid1_end_read_request(struct bio *bio, int error) bdevname(conf->mirrors[mirror].rdev->bdev, b), (unsigned long long)r1_bio->sector); + set_bit(R1BIO_ReadError, &r1_bio->state); reschedule_retry(r1_bio); } @@ -381,7 +398,7 @@ static void raid1_end_write_request(struct bio *bio, int error) (unsigned long long) mbio->bi_sector, (unsigned long long) mbio->bi_sector + (mbio->bi_size >> 9) - 1); - bio_endio(mbio, 0); + call_bio_endio(r1_bio); } } } @@ -412,10 +429,11 @@ static void raid1_end_write_request(struct bio *bio, int error) * * The rdev for the device selected will have nr_pending incremented. */ -static int read_balance(conf_t *conf, r1bio_t *r1_bio) +static int read_balance(conf_t *conf, r1bio_t *r1_bio, int *max_sectors) { const sector_t this_sector = r1_bio->sector; - const int sectors = r1_bio->sectors; + int sectors; + int best_good_sectors; int start_disk; int best_disk; int i; @@ -430,8 +448,11 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) * We take the first readable disk when above the resync window. */ retry: + sectors = r1_bio->sectors; best_disk = -1; best_dist = MaxSector; + best_good_sectors = 0; + if (conf->mddev->recovery_cp < MaxSector && (this_sector + sectors >= conf->next_resync)) { choose_first = 1; @@ -443,6 +464,9 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) for (i = 0 ; i < conf->raid_disks ; i++) { sector_t dist; + sector_t first_bad; + int bad_sectors; + int disk = start_disk + i; if (disk >= conf->raid_disks) disk -= conf->raid_disks; @@ -465,6 +489,35 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) /* This is a reasonable device to use. It might * even be best. */ + if (is_badblock(rdev, this_sector, sectors, + &first_bad, &bad_sectors)) { + if (best_dist < MaxSector) + /* already have a better device */ + continue; + if (first_bad <= this_sector) { + /* cannot read here. If this is the 'primary' + * device, then we must not read beyond + * bad_sectors from another device.. + */ + bad_sectors -= (this_sector - first_bad); + if (choose_first && sectors > bad_sectors) + sectors = bad_sectors; + if (best_good_sectors > sectors) + best_good_sectors = sectors; + + } else { + sector_t good_sectors = first_bad - this_sector; + if (good_sectors > best_good_sectors) { + best_good_sectors = good_sectors; + best_disk = disk; + } + if (choose_first) + break; + } + continue; + } else + best_good_sectors = sectors; + dist = abs(this_sector - conf->mirrors[disk].head_position); if (choose_first /* Don't change to another disk for sequential reads */ @@ -493,10 +546,12 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) rdev_dec_pending(rdev, conf->mddev); goto retry; } + sectors = best_good_sectors; conf->next_seq_sect = this_sector + sectors; conf->last_used = best_disk; } rcu_read_unlock(); + *max_sectors = sectors; return best_disk; } @@ -763,11 +818,25 @@ static int make_request(mddev_t *mddev, struct bio * bio) r1_bio->mddev = mddev; r1_bio->sector = bio->bi_sector; + /* We might need to issue multiple reads to different + * devices if there are bad blocks around, so we keep + * track of the number of reads in bio->bi_phys_segments. + * If this is 0, there is only one r1_bio and no locking + * will be needed when requests complete. If it is + * non-zero, then it is the number of not-completed requests. + */ + bio->bi_phys_segments = 0; + clear_bit(BIO_SEG_VALID, &bio->bi_flags); + if (rw == READ) { /* * read balancing logic: */ - int rdisk = read_balance(conf, r1_bio); + int max_sectors; + int rdisk; + +read_again: + rdisk = read_balance(conf, r1_bio, &max_sectors); if (rdisk < 0) { /* couldn't find anywhere to read from */ @@ -788,6 +857,8 @@ static int make_request(mddev_t *mddev, struct bio * bio) r1_bio->read_disk = rdisk; read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); + md_trim_bio(read_bio, r1_bio->sector - bio->bi_sector, + max_sectors); r1_bio->bios[rdisk] = read_bio; @@ -797,7 +868,38 @@ static int make_request(mddev_t *mddev, struct bio * bio) read_bio->bi_rw = READ | do_sync; read_bio->bi_private = r1_bio; - generic_make_request(read_bio); + if (max_sectors < r1_bio->sectors) { + /* could not read all from this device, so we will + * need another r1_bio. + */ + int sectors_handled; + + sectors_handled = (r1_bio->sector + max_sectors + - bio->bi_sector); + r1_bio->sectors = max_sectors; + spin_lock_irq(&conf->device_lock); + if (bio->bi_phys_segments == 0) + bio->bi_phys_segments = 2; + else + bio->bi_phys_segments++; + spin_unlock_irq(&conf->device_lock); + /* Cannot call generic_make_request directly + * as that will be queued in __make_request + * and subsequent mempool_alloc might block waiting + * for it. So hand bio over to raid1d. + */ + reschedule_retry(r1_bio); + + r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); + + r1_bio->master_bio = bio; + r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled; + r1_bio->state = 0; + r1_bio->mddev = mddev; + r1_bio->sector = bio->bi_sector + sectors_handled; + goto read_again; + } else + generic_make_request(read_bio); return 0; } @@ -849,8 +951,6 @@ static int make_request(mddev_t *mddev, struct bio * bio) goto retry_write; } - BUG_ON(targets == 0); /* we never fail the last device */ - if (targets < conf->raid_disks) { /* array is degraded, we will not clear the bitmap * on I/O completion (see raid1_end_write_request) */ @@ -1425,7 +1525,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) * * 1. Retries failed read operations on working mirrors. * 2. Updates the raid superblock when problems encounter. - * 3. Performs writes following reads for array syncronising. + * 3. Performs writes following reads for array synchronising. */ static void fix_read_error(conf_t *conf, int read_disk, @@ -1448,9 +1548,14 @@ static void fix_read_error(conf_t *conf, int read_disk, * which is the thread that might remove * a device. If raid1d ever becomes multi-threaded.... */ + sector_t first_bad; + int bad_sectors; + rdev = conf->mirrors[d].rdev; if (rdev && test_bit(In_sync, &rdev->flags) && + is_badblock(rdev, sect, s, + &first_bad, &bad_sectors) == 0 && sync_page_io(rdev, sect, s<<9, conf->tmppage, READ, false)) success = 1; @@ -1546,9 +1651,11 @@ static void raid1d(mddev_t *mddev) conf = mddev->private; if (test_bit(R1BIO_IsSync, &r1_bio->state)) sync_request_write(mddev, r1_bio); - else { + else if (test_bit(R1BIO_ReadError, &r1_bio->state)) { int disk; + int max_sectors; + clear_bit(R1BIO_ReadError, &r1_bio->state); /* we got a read error. Maybe the drive is bad. Maybe just * the block and we can fix it. * We freeze all other IO, and try reading the block from @@ -1568,21 +1675,28 @@ static void raid1d(mddev_t *mddev) conf->mirrors[r1_bio->read_disk].rdev); bio = r1_bio->bios[r1_bio->read_disk]; - if ((disk=read_balance(conf, r1_bio)) == -1) { + bdevname(bio->bi_bdev, b); +read_more: + disk = read_balance(conf, r1_bio, &max_sectors); + if (disk == -1) { printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O" " read error for block %llu\n", - mdname(mddev), - bdevname(bio->bi_bdev,b), + mdname(mddev), b, (unsigned long long)r1_bio->sector); raid_end_bio_io(r1_bio); } else { const unsigned long do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC; - r1_bio->bios[r1_bio->read_disk] = - mddev->ro ? IO_BLOCKED : NULL; + if (bio) { + r1_bio->bios[r1_bio->read_disk] = + mddev->ro ? IO_BLOCKED : NULL; + bio_put(bio); + } r1_bio->read_disk = disk; - bio_put(bio); bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev); + md_trim_bio(bio, + r1_bio->sector - bio->bi_sector, + max_sectors); r1_bio->bios[r1_bio->read_disk] = bio; rdev = conf->mirrors[disk].rdev; printk_ratelimited( @@ -1597,8 +1711,44 @@ static void raid1d(mddev_t *mddev) bio->bi_end_io = raid1_end_read_request; bio->bi_rw = READ | do_sync; bio->bi_private = r1_bio; - generic_make_request(bio); + if (max_sectors < r1_bio->sectors) { + /* Drat - have to split this up more */ + struct bio *mbio = r1_bio->master_bio; + int sectors_handled = + r1_bio->sector + max_sectors + - mbio->bi_sector; + r1_bio->sectors = max_sectors; + spin_lock_irq(&conf->device_lock); + if (mbio->bi_phys_segments == 0) + mbio->bi_phys_segments = 2; + else + mbio->bi_phys_segments++; + spin_unlock_irq(&conf->device_lock); + generic_make_request(bio); + bio = NULL; + + r1_bio = mempool_alloc(conf->r1bio_pool, + GFP_NOIO); + + r1_bio->master_bio = mbio; + r1_bio->sectors = (mbio->bi_size >> 9) + - sectors_handled; + r1_bio->state = 0; + set_bit(R1BIO_ReadError, + &r1_bio->state); + r1_bio->mddev = mddev; + r1_bio->sector = mbio->bi_sector + + sectors_handled; + + goto read_more; + } else + generic_make_request(bio); } + } else { + /* just a partial read to be scheduled from separate + * context + */ + generic_make_request(r1_bio->bios[r1_bio->read_disk]); } cond_resched(); } diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h index 3cd18cfda2ad..aa6af37ca01b 100644 --- a/drivers/md/raid1.h +++ b/drivers/md/raid1.h @@ -123,6 +123,10 @@ struct r1bio_s { #define R1BIO_IsSync 1 #define R1BIO_Degraded 2 #define R1BIO_BehindIO 3 +/* Set ReadError on bios that experience a readerror so that + * raid1d knows what to do with them. + */ +#define R1BIO_ReadError 4 /* For write-behind requests, we call bi_end_io when * the last non-write-behind device completes, providing * any write was successful. Otherwise we call when -- cgit v1.2.3 From 06f603851fa90bcd236328438278d4dc8b655495 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 28 Jul 2011 11:31:48 +1000 Subject: md/raid1: avoid reading known bad blocks during resync When performing resync/etc, keep the size of the request small enough that it doesn't overlap any known bad blocks. Devices with badblocks at the start of the request are completely excluded. If there is nowhere to read from due to bad blocks, record a bad block on each target device. Now that we never read from known-bad-blocks we can allow devices with known-bad-blocks into a RAID1. Signed-off-by: NeilBrown --- drivers/md/raid1.c | 97 +++++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 75 insertions(+), 22 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index cc3939dc9e3d..8c31c39b6f8c 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1155,9 +1155,6 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) if (mddev->recovery_disabled == conf->recovery_disabled) return -EBUSY; - if (rdev->badblocks.count) - return -EINVAL; - if (rdev->raid_disk >= 0) first = last = rdev->raid_disk; @@ -1303,6 +1300,9 @@ static int fix_sync_read_error(r1bio_t *r1_bio) * We don't need to freeze the array, because being in an * active sync request, there is no normal IO, and * no overlapping syncs. + * We don't need to check is_badblock() again as we + * made sure that anything with a bad block in range + * will have bi_end_io clear. */ mddev_t *mddev = r1_bio->mddev; conf_t *conf = mddev->private; @@ -1792,6 +1792,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i int write_targets = 0, read_targets = 0; sector_t sync_blocks; int still_degraded = 0; + int good_sectors = RESYNC_SECTORS; + int min_bad = 0; /* number of sectors that are bad in all devices */ if (!conf->r1buf_pool) if (init_resync(conf)) @@ -1879,36 +1881,89 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i rdev = rcu_dereference(conf->mirrors[i].rdev); if (rdev == NULL || - test_bit(Faulty, &rdev->flags)) { + test_bit(Faulty, &rdev->flags)) { still_degraded = 1; - continue; } else if (!test_bit(In_sync, &rdev->flags)) { bio->bi_rw = WRITE; bio->bi_end_io = end_sync_write; write_targets ++; } else { /* may need to read from here */ - bio->bi_rw = READ; - bio->bi_end_io = end_sync_read; - if (test_bit(WriteMostly, &rdev->flags)) { - if (wonly < 0) - wonly = i; - } else { - if (disk < 0) - disk = i; + sector_t first_bad = MaxSector; + int bad_sectors; + + if (is_badblock(rdev, sector_nr, good_sectors, + &first_bad, &bad_sectors)) { + if (first_bad > sector_nr) + good_sectors = first_bad - sector_nr; + else { + bad_sectors -= (sector_nr - first_bad); + if (min_bad == 0 || + min_bad > bad_sectors) + min_bad = bad_sectors; + } + } + if (sector_nr < first_bad) { + if (test_bit(WriteMostly, &rdev->flags)) { + if (wonly < 0) + wonly = i; + } else { + if (disk < 0) + disk = i; + } + bio->bi_rw = READ; + bio->bi_end_io = end_sync_read; + read_targets++; } - read_targets++; } - atomic_inc(&rdev->nr_pending); - bio->bi_sector = sector_nr + rdev->data_offset; - bio->bi_bdev = rdev->bdev; - bio->bi_private = r1_bio; + if (bio->bi_end_io) { + atomic_inc(&rdev->nr_pending); + bio->bi_sector = sector_nr + rdev->data_offset; + bio->bi_bdev = rdev->bdev; + bio->bi_private = r1_bio; + } } rcu_read_unlock(); if (disk < 0) disk = wonly; r1_bio->read_disk = disk; + if (read_targets == 0 && min_bad > 0) { + /* These sectors are bad on all InSync devices, so we + * need to mark them bad on all write targets + */ + int ok = 1; + for (i = 0 ; i < conf->raid_disks ; i++) + if (r1_bio->bios[i]->bi_end_io == end_sync_write) { + mdk_rdev_t *rdev = + rcu_dereference(conf->mirrors[i].rdev); + ok = rdev_set_badblocks(rdev, sector_nr, + min_bad, 0 + ) && ok; + } + set_bit(MD_CHANGE_DEVS, &mddev->flags); + *skipped = 1; + put_buf(r1_bio); + + if (!ok) { + /* Cannot record the badblocks, so need to + * abort the resync. + * If there are multiple read targets, could just + * fail the really bad ones ??? + */ + conf->recovery_disabled = mddev->recovery_disabled; + set_bit(MD_RECOVERY_INTR, &mddev->recovery); + return 0; + } else + return min_bad; + + } + if (min_bad > 0 && min_bad < good_sectors) { + /* only resync enough to reach the next bad->good + * transition */ + good_sectors = min_bad; + } + if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0) /* extra read targets are also write targets */ write_targets += read_targets-1; @@ -1925,6 +1980,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i if (max_sector > mddev->resync_max) max_sector = mddev->resync_max; /* Don't do IO beyond here */ + if (max_sector > sector_nr + good_sectors) + max_sector = sector_nr + good_sectors; nr_sectors = 0; sync_blocks = 0; do { @@ -2147,10 +2204,6 @@ static int run(mddev_t *mddev) blk_queue_segment_boundary(mddev->queue, PAGE_CACHE_SIZE - 1); } - if (rdev->badblocks.count) { - printk(KERN_ERR "md/raid1: Cannot handle bad blocks yet\n"); - return -EINVAL; - } } mddev->degraded = 0; -- cgit v1.2.3 From d7a9d443bc8a75a24873c0506f50051edfedc714 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 28 Jul 2011 11:31:48 +1000 Subject: md: add 'write_error' flag to component devices. If a device has ever seen a write error, we will want to handle known-bad-blocks differently. So create an appropriate state flag and export it via sysfs. Signed-off-by: NeilBrown Reviewed-by: Namhyung Kim --- drivers/md/md.c | 12 ++++++++++++ drivers/md/md.h | 3 +++ 2 files changed, 15 insertions(+) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index 48217e8aa0eb..1520d18c5af5 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -2516,6 +2516,10 @@ state_show(mdk_rdev_t *rdev, char *page) len += sprintf(page+len, "%sspare", sep); sep = ","; } + if (test_bit(WriteErrorSeen, &rdev->flags)) { + len += sprintf(page+len, "%swrite_error", sep); + sep = ","; + } return len+sprintf(page+len, "\n"); } @@ -2530,6 +2534,8 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len) * blocked - sets the Blocked flag * -blocked - clears the Blocked flag * insync - sets Insync providing device isn't active + * write_error - sets WriteErrorSeen + * -write_error - clears WriteErrorSeen */ int err = -EINVAL; if (cmd_match(buf, "faulty") && rdev->mddev->pers) { @@ -2565,6 +2571,12 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len) } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) { set_bit(In_sync, &rdev->flags); err = 0; + } else if (cmd_match(buf, "write_error")) { + set_bit(WriteErrorSeen, &rdev->flags); + err = 0; + } else if (cmd_match(buf, "-write_error")) { + clear_bit(WriteErrorSeen, &rdev->flags); + err = 0; } if (!err) sysfs_notify_dirent_safe(rdev->sysfs_state); diff --git a/drivers/md/md.h b/drivers/md/md.h index 7c3192c0a29a..fa4b607854ac 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -84,6 +84,9 @@ struct mdk_rdev_s #define Blocked 8 /* An error occurred on an externally * managed array, don't allow writes * until it is cleared */ +#define WriteErrorSeen 9 /* A write error has been seen on this + * device + */ wait_queue_head_t blocked_wait; int desc_nr; /* descriptor index in the superblock */ -- cgit v1.2.3 From de393cdea66cbd63c90725663f400c76faf1b255 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 28 Jul 2011 11:31:48 +1000 Subject: md: make it easier to wait for bad blocks to be acknowledged. It is only safe to choose not to write to a bad block if that bad block is safely recorded in metadata - i.e. if it has been 'acknowledged'. If it hasn't we need to wait for the acknowledgement. We support that using rdev->blocked wait and md_wait_for_blocked_rdev by introducing a new device flag 'BlockedBadBlock'. This flag is only advisory. It is cleared whenever we acknowledge a bad block, so that a waiter can re-check the particular bad blocks that it is interested it. It should be set by a caller when they find they need to wait. This (set after test) is inherently racy, but as md_wait_for_blocked_rdev already has a timeout, losing the race will have minimal impact. When we clear "Blocked" was also clear "BlockedBadBlocks" incase it was set incorrectly (see above race). We also modify the way we manage 'Blocked' to fit better with the new handling of 'BlockedBadBlocks' and to make it consistent between externally managed and internally managed metadata. This requires that each raidXd loop checks if the metadata needs to be written and triggers a write (md_check_recovery) if needed. Otherwise a queued write request might cause raidXd to wait for the metadata to write, and only that thread can write it. Before writing metadata, we set FaultRecorded for all devices that are Faulty, then after writing the metadata we clear Blocked for any device for which the Fault was certainly Recorded. The 'faulty' device flag now appears in sysfs if the device is faulty *or* it has unacknowledged bad blocks. So user-space which does not understand bad blocks can continue to function correctly. User space which does, should not assume a device is faulty until it sees the 'faulty' flag, and then sees the list of unacknowledged bad blocks is empty. Signed-off-by: NeilBrown --- drivers/md/md.c | 77 ++++++++++++++++++++++++++++++++++++----------------- drivers/md/md.h | 25 +++++++++++++++-- drivers/md/raid1.c | 3 +++ drivers/md/raid10.c | 3 +++ drivers/md/raid5.c | 4 +++ 5 files changed, 85 insertions(+), 27 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index 1520d18c5af5..a6b6471da2bc 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -2341,8 +2341,18 @@ repeat: if (!mddev->persistent) { clear_bit(MD_CHANGE_CLEAN, &mddev->flags); clear_bit(MD_CHANGE_DEVS, &mddev->flags); - if (!mddev->external) + if (!mddev->external) { clear_bit(MD_CHANGE_PENDING, &mddev->flags); + list_for_each_entry(rdev, &mddev->disks, same_set) { + if (rdev->badblocks.changed) { + md_ack_all_badblocks(&rdev->badblocks); + md_error(mddev, rdev); + } + clear_bit(Blocked, &rdev->flags); + clear_bit(BlockedBadBlocks, &rdev->flags); + wake_up(&rdev->blocked_wait); + } + } wake_up(&mddev->sb_wait); return; } @@ -2399,9 +2409,12 @@ repeat: mddev->events --; } - list_for_each_entry(rdev, &mddev->disks, same_set) + list_for_each_entry(rdev, &mddev->disks, same_set) { if (rdev->badblocks.changed) any_badblocks_changed++; + if (test_bit(Faulty, &rdev->flags)) + set_bit(FaultRecorded, &rdev->flags); + } sync_sbs(mddev, nospares); spin_unlock_irq(&mddev->write_lock); @@ -2458,9 +2471,15 @@ repeat: if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) sysfs_notify(&mddev->kobj, NULL, "sync_completed"); - if (any_badblocks_changed) - list_for_each_entry(rdev, &mddev->disks, same_set) + list_for_each_entry(rdev, &mddev->disks, same_set) { + if (test_and_clear_bit(FaultRecorded, &rdev->flags)) + clear_bit(Blocked, &rdev->flags); + + if (any_badblocks_changed) md_ack_all_badblocks(&rdev->badblocks); + clear_bit(BlockedBadBlocks, &rdev->flags); + wake_up(&rdev->blocked_wait); + } } /* words written to sysfs files may, or may not, be \n terminated. @@ -2495,7 +2514,8 @@ state_show(mdk_rdev_t *rdev, char *page) char *sep = ""; size_t len = 0; - if (test_bit(Faulty, &rdev->flags)) { + if (test_bit(Faulty, &rdev->flags) || + rdev->badblocks.unacked_exist) { len+= sprintf(page+len, "%sfaulty",sep); sep = ","; } @@ -2507,7 +2527,8 @@ state_show(mdk_rdev_t *rdev, char *page) len += sprintf(page+len, "%swrite_mostly",sep); sep = ","; } - if (test_bit(Blocked, &rdev->flags)) { + if (test_bit(Blocked, &rdev->flags) || + rdev->badblocks.unacked_exist) { len += sprintf(page+len, "%sblocked", sep); sep = ","; } @@ -2527,12 +2548,12 @@ static ssize_t state_store(mdk_rdev_t *rdev, const char *buf, size_t len) { /* can write - * faulty - simulates and error + * faulty - simulates an error * remove - disconnects the device * writemostly - sets write_mostly * -writemostly - clears write_mostly - * blocked - sets the Blocked flag - * -blocked - clears the Blocked flag + * blocked - sets the Blocked flags + * -blocked - clears the Blocked and possibly simulates an error * insync - sets Insync providing device isn't active * write_error - sets WriteErrorSeen * -write_error - clears WriteErrorSeen @@ -2562,7 +2583,15 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len) set_bit(Blocked, &rdev->flags); err = 0; } else if (cmd_match(buf, "-blocked")) { + if (!test_bit(Faulty, &rdev->flags) && + test_bit(BlockedBadBlocks, &rdev->flags)) { + /* metadata handler doesn't understand badblocks, + * so we need to fail the device + */ + md_error(rdev->mddev, rdev); + } clear_bit(Blocked, &rdev->flags); + clear_bit(BlockedBadBlocks, &rdev->flags); wake_up(&rdev->blocked_wait); set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); md_wakeup_thread(rdev->mddev->thread); @@ -2881,7 +2910,11 @@ static ssize_t bb_show(mdk_rdev_t *rdev, char *page) } static ssize_t bb_store(mdk_rdev_t *rdev, const char *page, size_t len) { - return badblocks_store(&rdev->badblocks, page, len, 0); + int rv = badblocks_store(&rdev->badblocks, page, len, 0); + /* Maybe that ack was all we needed */ + if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags)) + wake_up(&rdev->blocked_wait); + return rv; } static struct rdev_sysfs_entry rdev_bad_blocks = __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store); @@ -6398,18 +6431,7 @@ void md_error(mddev_t *mddev, mdk_rdev_t *rdev) if (!rdev || test_bit(Faulty, &rdev->flags)) return; - if (mddev->external) - set_bit(Blocked, &rdev->flags); -/* - dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n", - mdname(mddev), - MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev), - __builtin_return_address(0),__builtin_return_address(1), - __builtin_return_address(2),__builtin_return_address(3)); -*/ - if (!mddev->pers) - return; - if (!mddev->pers->error_handler) + if (!mddev->pers || !mddev->pers->error_handler) return; mddev->pers->error_handler(mddev,rdev); if (mddev->degraded) @@ -7286,8 +7308,7 @@ static int remove_and_add_spares(mddev_t *mddev) list_for_each_entry(rdev, &mddev->disks, same_set) { if (rdev->raid_disk >= 0 && !test_bit(In_sync, &rdev->flags) && - !test_bit(Faulty, &rdev->flags) && - !test_bit(Blocked, &rdev->flags)) + !test_bit(Faulty, &rdev->flags)) spares++; if (rdev->raid_disk < 0 && !test_bit(Faulty, &rdev->flags)) { @@ -7533,7 +7554,8 @@ void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev) { sysfs_notify_dirent_safe(rdev->sysfs_state); wait_event_timeout(rdev->blocked_wait, - !test_bit(Blocked, &rdev->flags), + !test_bit(Blocked, &rdev->flags) && + !test_bit(BlockedBadBlocks, &rdev->flags), msecs_to_jiffies(5000)); rdev_dec_pending(rdev, mddev); } @@ -7779,6 +7801,8 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors, } bb->changed = 1; + if (!acknowledged) + bb->unacked_exist = 1; write_sequnlock_irq(&bb->lock); return rv; @@ -7923,6 +7947,7 @@ void md_ack_all_badblocks(struct badblocks *bb) p[i] = BB_MAKE(start, len, 1); } } + bb->unacked_exist = 0; } write_sequnlock_irq(&bb->lock); } @@ -7970,6 +7995,8 @@ retry: (unsigned long long)s << bb->shift, length << bb->shift); } + if (unack && len == 0) + bb->unacked_exist = 0; if (read_seqretry(&bb->lock, seq)) goto retry; diff --git a/drivers/md/md.h b/drivers/md/md.h index fa4b607854ac..1e586bb4452e 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -81,12 +81,29 @@ struct mdk_rdev_s #define In_sync 2 /* device is in_sync with rest of array */ #define WriteMostly 4 /* Avoid reading if at all possible */ #define AutoDetected 7 /* added by auto-detect */ -#define Blocked 8 /* An error occurred on an externally - * managed array, don't allow writes +#define Blocked 8 /* An error occurred but has not yet + * been acknowledged by the metadata + * handler, so don't allow writes * until it is cleared */ #define WriteErrorSeen 9 /* A write error has been seen on this * device */ +#define FaultRecorded 10 /* Intermediate state for clearing + * Blocked. The Fault is/will-be + * recorded in the metadata, but that + * metadata hasn't been stored safely + * on disk yet. + */ +#define BlockedBadBlocks 11 /* A writer is blocked because they + * found an unacknowledged bad-block. + * This can safely be cleared at any + * time, and the writer will re-check. + * It may be set at any time, and at + * worst the writer will timeout and + * re-check. So setting it as + * accurately as possible is good, but + * not absolutely critical. + */ wait_queue_head_t blocked_wait; int desc_nr; /* descriptor index in the superblock */ @@ -124,6 +141,10 @@ struct mdk_rdev_s struct badblocks { int count; /* count of bad blocks */ + int unacked_exist; /* there probably are unacknowledged + * bad blocks. This is only cleared + * when a read discovers none + */ int shift; /* shift from sectors to block size * a -ve shift means badblocks are * disabled.*/ diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 8c31c39b6f8c..4d40d9d54a20 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1059,6 +1059,7 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev) conf->recovery_disabled = mddev->recovery_disabled; return; } + set_bit(Blocked, &rdev->flags); if (test_and_clear_bit(In_sync, &rdev->flags)) { unsigned long flags; spin_lock_irqsave(&conf->device_lock, flags); @@ -1751,6 +1752,8 @@ read_more: generic_make_request(r1_bio->bios[r1_bio->read_disk]); } cond_resched(); + if (mddev->flags & ~(1<recovery); } + set_bit(Blocked, &rdev->flags); set_bit(Faulty, &rdev->flags); set_bit(MD_CHANGE_DEVS, &mddev->flags); printk(KERN_ALERT @@ -1703,6 +1704,8 @@ static void raid10d(mddev_t *mddev) } } cond_resched(); + if (mddev->flags & ~(1<recovery); } + set_bit(Blocked, &rdev->flags); set_bit(Faulty, &rdev->flags); set_bit(MD_CHANGE_DEVS, &mddev->flags); printk(KERN_ALERT @@ -4143,6 +4144,9 @@ static void raid5d(mddev_t *mddev) release_stripe(sh); cond_resched(); + if (mddev->flags & ~(1<device_lock); } pr_debug("%d stripes handled\n", handled); -- cgit v1.2.3 From 1f68f0c4b677ccd6935ff61e4e6888787505f8dc Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 28 Jul 2011 11:31:48 +1000 Subject: md/raid1: avoid writing to known-bad blocks on known-bad drives. If we have seen any write error on a drive, then don't write to any known-bad blocks on that drive. If necessary, we divide the write request up into pieces just like we do for reads, so each piece is either all written or all not written to any given drive. Signed-off-by: NeilBrown Reviewed-by: Namhyung Kim --- drivers/md/raid1.c | 153 ++++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 115 insertions(+), 38 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 4d40d9d54a20..3214606204d2 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -764,7 +764,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) mirror_info_t *mirror; r1bio_t *r1_bio; struct bio *read_bio; - int i, targets = 0, disks; + int i, disks; struct bitmap *bitmap; unsigned long flags; const int rw = bio_data_dir(bio); @@ -772,6 +772,9 @@ static int make_request(mddev_t *mddev, struct bio * bio) const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); mdk_rdev_t *blocked_rdev; int plugged; + int first_clone; + int sectors_handled; + int max_sectors; /* * Register the new request and wait if the reconstruction @@ -832,7 +835,6 @@ static int make_request(mddev_t *mddev, struct bio * bio) /* * read balancing logic: */ - int max_sectors; int rdisk; read_again: @@ -872,7 +874,6 @@ read_again: /* could not read all from this device, so we will * need another r1_bio. */ - int sectors_handled; sectors_handled = (r1_bio->sector + max_sectors - bio->bi_sector); @@ -906,9 +907,15 @@ read_again: /* * WRITE: */ - /* first select target devices under spinlock and + /* first select target devices under rcu_lock and * inc refcount on their rdev. Record them by setting * bios[x] to bio + * If there are known/acknowledged bad blocks on any device on + * which we have seen a write error, we want to avoid writing those + * blocks. + * This potentially requires several writes to write around + * the bad blocks. Each set of writes gets it's own r1bio + * with a set of bios attached. */ plugged = mddev_check_plugged(mddev); @@ -916,6 +923,7 @@ read_again: retry_write: blocked_rdev = NULL; rcu_read_lock(); + max_sectors = r1_bio->sectors; for (i = 0; i < disks; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { @@ -923,17 +931,56 @@ read_again: blocked_rdev = rdev; break; } - if (rdev && !test_bit(Faulty, &rdev->flags)) { - atomic_inc(&rdev->nr_pending); - if (test_bit(Faulty, &rdev->flags)) { + r1_bio->bios[i] = NULL; + if (!rdev || test_bit(Faulty, &rdev->flags)) { + set_bit(R1BIO_Degraded, &r1_bio->state); + continue; + } + + atomic_inc(&rdev->nr_pending); + if (test_bit(WriteErrorSeen, &rdev->flags)) { + sector_t first_bad; + int bad_sectors; + int is_bad; + + is_bad = is_badblock(rdev, r1_bio->sector, + max_sectors, + &first_bad, &bad_sectors); + if (is_bad < 0) { + /* mustn't write here until the bad block is + * acknowledged*/ + set_bit(BlockedBadBlocks, &rdev->flags); + blocked_rdev = rdev; + break; + } + if (is_bad && first_bad <= r1_bio->sector) { + /* Cannot write here at all */ + bad_sectors -= (r1_bio->sector - first_bad); + if (bad_sectors < max_sectors) + /* mustn't write more than bad_sectors + * to other devices yet + */ + max_sectors = bad_sectors; rdev_dec_pending(rdev, mddev); - r1_bio->bios[i] = NULL; - } else { - r1_bio->bios[i] = bio; - targets++; + /* We don't set R1BIO_Degraded as that + * only applies if the disk is + * missing, so it might be re-added, + * and we want to know to recover this + * chunk. + * In this case the device is here, + * and the fact that this chunk is not + * in-sync is recorded in the bad + * block log + */ + continue; } - } else - r1_bio->bios[i] = NULL; + if (is_bad) { + int good_sectors = first_bad - r1_bio->sector; + if (good_sectors < max_sectors) + max_sectors = good_sectors; + } + } + r1_bio->bios[i] = bio; } rcu_read_unlock(); @@ -944,48 +991,56 @@ read_again: for (j = 0; j < i; j++) if (r1_bio->bios[j]) rdev_dec_pending(conf->mirrors[j].rdev, mddev); - + r1_bio->state = 0; allow_barrier(conf); md_wait_for_blocked_rdev(blocked_rdev, mddev); wait_barrier(conf); goto retry_write; } - if (targets < conf->raid_disks) { - /* array is degraded, we will not clear the bitmap - * on I/O completion (see raid1_end_write_request) */ - set_bit(R1BIO_Degraded, &r1_bio->state); + if (max_sectors < r1_bio->sectors) { + /* We are splitting this write into multiple parts, so + * we need to prepare for allocating another r1_bio. + */ + r1_bio->sectors = max_sectors; + spin_lock_irq(&conf->device_lock); + if (bio->bi_phys_segments == 0) + bio->bi_phys_segments = 2; + else + bio->bi_phys_segments++; + spin_unlock_irq(&conf->device_lock); } - - /* do behind I/O ? - * Not if there are too many, or cannot allocate memory, - * or a reader on WriteMostly is waiting for behind writes - * to flush */ - if (bitmap && - (atomic_read(&bitmap->behind_writes) - < mddev->bitmap_info.max_write_behind) && - !waitqueue_active(&bitmap->behind_wait)) - alloc_behind_pages(bio, r1_bio); + sectors_handled = r1_bio->sector + max_sectors - bio->bi_sector; atomic_set(&r1_bio->remaining, 1); atomic_set(&r1_bio->behind_remaining, 0); - bitmap_startwrite(bitmap, bio->bi_sector, r1_bio->sectors, - test_bit(R1BIO_BehindIO, &r1_bio->state)); + first_clone = 1; for (i = 0; i < disks; i++) { struct bio *mbio; if (!r1_bio->bios[i]) continue; mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); - r1_bio->bios[i] = mbio; - - mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset; - mbio->bi_bdev = conf->mirrors[i].rdev->bdev; - mbio->bi_end_io = raid1_end_write_request; - mbio->bi_rw = WRITE | do_flush_fua | do_sync; - mbio->bi_private = r1_bio; - + md_trim_bio(mbio, r1_bio->sector - bio->bi_sector, max_sectors); + + if (first_clone) { + /* do behind I/O ? + * Not if there are too many, or cannot + * allocate memory, or a reader on WriteMostly + * is waiting for behind writes to flush */ + if (bitmap && + (atomic_read(&bitmap->behind_writes) + < mddev->bitmap_info.max_write_behind) && + !waitqueue_active(&bitmap->behind_wait)) + alloc_behind_pages(mbio, r1_bio); + + bitmap_startwrite(bitmap, r1_bio->sector, + r1_bio->sectors, + test_bit(R1BIO_BehindIO, + &r1_bio->state)); + first_clone = 0; + } if (r1_bio->behind_pages) { struct bio_vec *bvec; int j; @@ -1003,6 +1058,15 @@ read_again: atomic_inc(&r1_bio->behind_remaining); } + r1_bio->bios[i] = mbio; + + mbio->bi_sector = (r1_bio->sector + + conf->mirrors[i].rdev->data_offset); + mbio->bi_bdev = conf->mirrors[i].rdev->bdev; + mbio->bi_end_io = raid1_end_write_request; + mbio->bi_rw = WRITE | do_flush_fua | do_sync; + mbio->bi_private = r1_bio; + atomic_inc(&r1_bio->remaining); spin_lock_irqsave(&conf->device_lock, flags); bio_list_add(&conf->pending_bio_list, mbio); @@ -1013,6 +1077,19 @@ read_again: /* In case raid1d snuck in to freeze_array */ wake_up(&conf->wait_barrier); + if (sectors_handled < (bio->bi_size >> 9)) { + /* We need another r1_bio. It has already been counted + * in bio->bi_phys_segments + */ + r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); + r1_bio->master_bio = bio; + r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled; + r1_bio->state = 0; + r1_bio->mddev = mddev; + r1_bio->sector = bio->bi_sector + sectors_handled; + goto retry_write; + } + if (do_sync || !bitmap || !plugged) md_wakeup_thread(mddev->thread); -- cgit v1.2.3 From 4367af556133723d0f443e14ca8170d9447317cb Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 28 Jul 2011 11:31:49 +1000 Subject: md/raid1: clear bad-block record when write succeeds. If we succeed in writing to a block that was recorded as being bad, we clear the bad-block record. This requires some delayed handling as the bad-block-list update has to happen in process-context. Signed-off-by: NeilBrown Reviewed-by: Namhyung Kim --- drivers/md/raid1.c | 79 ++++++++++++++++++++++++++++++++++++++++++++++-------- drivers/md/raid1.h | 13 ++++++++- 2 files changed, 80 insertions(+), 12 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 3214606204d2..141de75a6c7c 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -163,7 +163,7 @@ static void put_all_bios(conf_t *conf, r1bio_t *r1_bio) for (i = 0; i < conf->raid_disks; i++) { struct bio **bio = r1_bio->bios + i; - if (*bio && *bio != IO_BLOCKED) + if (!BIO_SPECIAL(*bio)) bio_put(*bio); *bio = NULL; } @@ -337,7 +337,10 @@ static void r1_bio_write_done(r1bio_t *r1_bio) !test_bit(R1BIO_Degraded, &r1_bio->state), test_bit(R1BIO_BehindIO, &r1_bio->state)); md_write_end(r1_bio->mddev); - raid_end_bio_io(r1_bio); + if (test_bit(R1BIO_MadeGood, &r1_bio->state)) + reschedule_retry(r1_bio); + else + raid_end_bio_io(r1_bio); } } @@ -363,7 +366,7 @@ static void raid1_end_write_request(struct bio *bio, int error) md_error(r1_bio->mddev, conf->mirrors[mirror].rdev); /* an I/O failed, we can't clear the bitmap */ set_bit(R1BIO_Degraded, &r1_bio->state); - } else + } else { /* * Set R1BIO_Uptodate in our master bio, so that we * will return a good error code for to the higher @@ -374,8 +377,20 @@ static void raid1_end_write_request(struct bio *bio, int error) * to user-side. So if something waits for IO, then it * will wait for the 'master' bio. */ + sector_t first_bad; + int bad_sectors; + set_bit(R1BIO_Uptodate, &r1_bio->state); + /* Maybe we can clear some bad blocks. */ + if (is_badblock(conf->mirrors[mirror].rdev, + r1_bio->sector, r1_bio->sectors, + &first_bad, &bad_sectors)) { + r1_bio->bios[mirror] = IO_MADE_GOOD; + set_bit(R1BIO_MadeGood, &r1_bio->state); + } + } + update_head_pos(mirror, r1_bio); if (behind) { @@ -402,7 +417,9 @@ static void raid1_end_write_request(struct bio *bio, int error) } } } - rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev); + if (r1_bio->bios[mirror] == NULL) + rdev_dec_pending(conf->mirrors[mirror].rdev, + conf->mddev); /* * Let's see if all mirrored write operations have finished @@ -1340,6 +1357,8 @@ static void end_sync_write(struct bio *bio, int error) conf_t *conf = mddev->private; int i; int mirror=0; + sector_t first_bad; + int bad_sectors; for (i = 0; i < conf->raid_disks; i++) if (r1_bio->bios[i] == bio) { @@ -1358,14 +1377,22 @@ static void end_sync_write(struct bio *bio, int error) sectors_to_go -= sync_blocks; } while (sectors_to_go > 0); md_error(mddev, conf->mirrors[mirror].rdev); - } + } else if (is_badblock(conf->mirrors[mirror].rdev, + r1_bio->sector, + r1_bio->sectors, + &first_bad, &bad_sectors)) + set_bit(R1BIO_MadeGood, &r1_bio->state); update_head_pos(mirror, r1_bio); if (atomic_dec_and_test(&r1_bio->remaining)) { - sector_t s = r1_bio->sectors; - put_buf(r1_bio); - md_done_sync(mddev, s, uptodate); + int s = r1_bio->sectors; + if (test_bit(R1BIO_MadeGood, &r1_bio->state)) + reschedule_retry(r1_bio); + else { + put_buf(r1_bio); + md_done_sync(mddev, s, uptodate); + } } } @@ -1727,9 +1754,39 @@ static void raid1d(mddev_t *mddev) mddev = r1_bio->mddev; conf = mddev->private; - if (test_bit(R1BIO_IsSync, &r1_bio->state)) - sync_request_write(mddev, r1_bio); - else if (test_bit(R1BIO_ReadError, &r1_bio->state)) { + if (test_bit(R1BIO_IsSync, &r1_bio->state)) { + if (test_bit(R1BIO_MadeGood, &r1_bio->state)) { + int m; + int s = r1_bio->sectors; + for (m = 0; m < conf->raid_disks ; m++) { + struct bio *bio = r1_bio->bios[m]; + if (bio->bi_end_io != NULL && + test_bit(BIO_UPTODATE, + &bio->bi_flags)) { + rdev = conf->mirrors[m].rdev; + rdev_clear_badblocks( + rdev, + r1_bio->sector, + r1_bio->sectors); + } + } + put_buf(r1_bio); + md_done_sync(mddev, s, 1); + } else + sync_request_write(mddev, r1_bio); + } else if (test_bit(R1BIO_MadeGood, &r1_bio->state)) { + int m; + for (m = 0; m < conf->raid_disks ; m++) + if (r1_bio->bios[m] == IO_MADE_GOOD) { + rdev = conf->mirrors[m].rdev; + rdev_clear_badblocks( + rdev, + r1_bio->sector, + r1_bio->sectors); + rdev_dec_pending(rdev, mddev); + } + raid_end_bio_io(r1_bio); + } else if (test_bit(R1BIO_ReadError, &r1_bio->state)) { int disk; int max_sectors; diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h index aa6af37ca01b..f81360d49af4 100644 --- a/drivers/md/raid1.h +++ b/drivers/md/raid1.h @@ -116,7 +116,14 @@ struct r1bio_s { * correct the read error. To keep track of bad blocks on a per-bio * level, we store IO_BLOCKED in the appropriate 'bios' pointer */ -#define IO_BLOCKED ((struct bio*)1) +#define IO_BLOCKED ((struct bio *)1) +/* When we successfully write to a known bad-block, we need to remove the + * bad-block marking which must be done from process context. So we record + * the success by setting bios[n] to IO_MADE_GOOD + */ +#define IO_MADE_GOOD ((struct bio *)2) + +#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) /* bits for r1bio.state */ #define R1BIO_Uptodate 0 @@ -135,6 +142,10 @@ struct r1bio_s { * Record that bi_end_io was called with this flag... */ #define R1BIO_Returned 6 +/* If a write for this request means we can clear some + * known-bad-block records, we set this flag + */ +#define R1BIO_MadeGood 7 extern int md_raid1_congested(mddev_t *mddev, int bits); -- cgit v1.2.3 From 2ca68f5ed7383733102ee53cd8fa4021ecc3b275 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 28 Jul 2011 11:32:10 +1000 Subject: md/raid1: store behind-write pages in bi_vecs. When performing write-behind we allocate pages to store the data during write. Previously we just keep a list of pages. Now we keep a list of bi_vec which includes offset and size. This means that the r1bio has complete information to create a new bio which will be needed for retrying after write errors. Signed-off-by: NeilBrown Reviewed-by: Namhyung Kim --- drivers/md/raid1.c | 33 +++++++++++++++++---------------- drivers/md/raid1.h | 2 +- 2 files changed, 18 insertions(+), 17 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 141de75a6c7c..b16d2ee5e9dd 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -327,9 +327,9 @@ static void r1_bio_write_done(r1bio_t *r1_bio) /* free extra copy of the data pages */ int i = r1_bio->behind_page_count; while (i--) - safe_put_page(r1_bio->behind_pages[i]); - kfree(r1_bio->behind_pages); - r1_bio->behind_pages = NULL; + safe_put_page(r1_bio->behind_bvecs[i].bv_page); + kfree(r1_bio->behind_bvecs); + r1_bio->behind_bvecs = NULL; } /* clear the bitmap if all writes complete successfully */ bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, @@ -748,30 +748,31 @@ static void alloc_behind_pages(struct bio *bio, r1bio_t *r1_bio) { int i; struct bio_vec *bvec; - struct page **pages = kzalloc(bio->bi_vcnt * sizeof(struct page*), + struct bio_vec *bvecs = kzalloc(bio->bi_vcnt * sizeof(struct bio_vec), GFP_NOIO); - if (unlikely(!pages)) + if (unlikely(!bvecs)) return; bio_for_each_segment(bvec, bio, i) { - pages[i] = alloc_page(GFP_NOIO); - if (unlikely(!pages[i])) + bvecs[i] = *bvec; + bvecs[i].bv_page = alloc_page(GFP_NOIO); + if (unlikely(!bvecs[i].bv_page)) goto do_sync_io; - memcpy(kmap(pages[i]) + bvec->bv_offset, - kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len); - kunmap(pages[i]); + memcpy(kmap(bvecs[i].bv_page) + bvec->bv_offset, + kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len); + kunmap(bvecs[i].bv_page); kunmap(bvec->bv_page); } - r1_bio->behind_pages = pages; + r1_bio->behind_bvecs = bvecs; r1_bio->behind_page_count = bio->bi_vcnt; set_bit(R1BIO_BehindIO, &r1_bio->state); return; do_sync_io: for (i = 0; i < bio->bi_vcnt; i++) - if (pages[i]) - put_page(pages[i]); - kfree(pages); + if (bvecs[i].bv_page) + put_page(bvecs[i].bv_page); + kfree(bvecs); PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); } @@ -1058,7 +1059,7 @@ read_again: &r1_bio->state)); first_clone = 0; } - if (r1_bio->behind_pages) { + if (r1_bio->behind_bvecs) { struct bio_vec *bvec; int j; @@ -1070,7 +1071,7 @@ read_again: * them all */ __bio_for_each_segment(bvec, mbio, j, 0) - bvec->bv_page = r1_bio->behind_pages[j]; + bvec->bv_page = r1_bio->behind_bvecs[j].bv_page; if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags)) atomic_inc(&r1_bio->behind_remaining); } diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h index f81360d49af4..b7885474e96c 100644 --- a/drivers/md/raid1.h +++ b/drivers/md/raid1.h @@ -101,7 +101,7 @@ struct r1bio_s { struct list_head retry_list; /* Next two are only valid when R1BIO_BehindIO is set */ - struct page **behind_pages; + struct bio_vec *behind_bvecs; int behind_page_count; /* * if the IO is in WRITE direction, then multiple bios are used. -- cgit v1.2.3 From cd5ff9a16f0831f68c1024d5d776075b5123b034 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 28 Jul 2011 11:32:41 +1000 Subject: md/raid1: Handle write errors by updating badblock log. When we get a write error (in the data area, not in metadata), update the badblock log rather than failing the whole device. As the write may well be many blocks, we trying writing each block individually and only log the ones which fail. Signed-off-by: NeilBrown Reviewed-by: Namhyung Kim --- drivers/md/raid1.c | 168 +++++++++++++++++++++++++++++++++++++++++++++-------- drivers/md/raid1.h | 3 +- 2 files changed, 147 insertions(+), 24 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index b16d2ee5e9dd..b45d641f4f33 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -318,25 +318,34 @@ static void raid1_end_read_request(struct bio *bio, int error) rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev); } +static void close_write(r1bio_t *r1_bio) +{ + /* it really is the end of this request */ + if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { + /* free extra copy of the data pages */ + int i = r1_bio->behind_page_count; + while (i--) + safe_put_page(r1_bio->behind_bvecs[i].bv_page); + kfree(r1_bio->behind_bvecs); + r1_bio->behind_bvecs = NULL; + } + /* clear the bitmap if all writes complete successfully */ + bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, + r1_bio->sectors, + !test_bit(R1BIO_Degraded, &r1_bio->state), + test_bit(R1BIO_BehindIO, &r1_bio->state)); + md_write_end(r1_bio->mddev); +} + static void r1_bio_write_done(r1bio_t *r1_bio) { - if (atomic_dec_and_test(&r1_bio->remaining)) - { - /* it really is the end of this request */ - if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { - /* free extra copy of the data pages */ - int i = r1_bio->behind_page_count; - while (i--) - safe_put_page(r1_bio->behind_bvecs[i].bv_page); - kfree(r1_bio->behind_bvecs); - r1_bio->behind_bvecs = NULL; - } - /* clear the bitmap if all writes complete successfully */ - bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, - r1_bio->sectors, - !test_bit(R1BIO_Degraded, &r1_bio->state), - test_bit(R1BIO_BehindIO, &r1_bio->state)); - md_write_end(r1_bio->mddev); + if (!atomic_dec_and_test(&r1_bio->remaining)) + return; + + if (test_bit(R1BIO_WriteError, &r1_bio->state)) + reschedule_retry(r1_bio); + else { + close_write(r1_bio); if (test_bit(R1BIO_MadeGood, &r1_bio->state)) reschedule_retry(r1_bio); else @@ -360,12 +369,10 @@ static void raid1_end_write_request(struct bio *bio, int error) /* * 'one mirror IO has finished' event handler: */ - r1_bio->bios[mirror] = NULL; - to_put = bio; if (!uptodate) { - md_error(r1_bio->mddev, conf->mirrors[mirror].rdev); - /* an I/O failed, we can't clear the bitmap */ - set_bit(R1BIO_Degraded, &r1_bio->state); + set_bit(WriteErrorSeen, + &conf->mirrors[mirror].rdev->flags); + set_bit(R1BIO_WriteError, &r1_bio->state); } else { /* * Set R1BIO_Uptodate in our master bio, so that we @@ -380,6 +387,8 @@ static void raid1_end_write_request(struct bio *bio, int error) sector_t first_bad; int bad_sectors; + r1_bio->bios[mirror] = NULL; + to_put = bio; set_bit(R1BIO_Uptodate, &r1_bio->state); /* Maybe we can clear some bad blocks. */ @@ -1724,6 +1733,101 @@ static void fix_read_error(conf_t *conf, int read_disk, } } +static void bi_complete(struct bio *bio, int error) +{ + complete((struct completion *)bio->bi_private); +} + +static int submit_bio_wait(int rw, struct bio *bio) +{ + struct completion event; + rw |= REQ_SYNC; + + init_completion(&event); + bio->bi_private = &event; + bio->bi_end_io = bi_complete; + submit_bio(rw, bio); + wait_for_completion(&event); + + return test_bit(BIO_UPTODATE, &bio->bi_flags); +} + +static int narrow_write_error(r1bio_t *r1_bio, int i) +{ + mddev_t *mddev = r1_bio->mddev; + conf_t *conf = mddev->private; + mdk_rdev_t *rdev = conf->mirrors[i].rdev; + int vcnt, idx; + struct bio_vec *vec; + + /* bio has the data to be written to device 'i' where + * we just recently had a write error. + * We repeatedly clone the bio and trim down to one block, + * then try the write. Where the write fails we record + * a bad block. + * It is conceivable that the bio doesn't exactly align with + * blocks. We must handle this somehow. + * + * We currently own a reference on the rdev. + */ + + int block_sectors; + sector_t sector; + int sectors; + int sect_to_write = r1_bio->sectors; + int ok = 1; + + if (rdev->badblocks.shift < 0) + return 0; + + block_sectors = 1 << rdev->badblocks.shift; + sector = r1_bio->sector; + sectors = ((sector + block_sectors) + & ~(sector_t)(block_sectors - 1)) + - sector; + + if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { + vcnt = r1_bio->behind_page_count; + vec = r1_bio->behind_bvecs; + idx = 0; + while (vec[idx].bv_page == NULL) + idx++; + } else { + vcnt = r1_bio->master_bio->bi_vcnt; + vec = r1_bio->master_bio->bi_io_vec; + idx = r1_bio->master_bio->bi_idx; + } + while (sect_to_write) { + struct bio *wbio; + if (sectors > sect_to_write) + sectors = sect_to_write; + /* Write at 'sector' for 'sectors'*/ + + wbio = bio_alloc_mddev(GFP_NOIO, vcnt, mddev); + memcpy(wbio->bi_io_vec, vec, vcnt * sizeof(struct bio_vec)); + wbio->bi_sector = r1_bio->sector; + wbio->bi_rw = WRITE; + wbio->bi_vcnt = vcnt; + wbio->bi_size = r1_bio->sectors << 9; + wbio->bi_idx = idx; + + md_trim_bio(wbio, sector - r1_bio->sector, sectors); + wbio->bi_sector += rdev->data_offset; + wbio->bi_bdev = rdev->bdev; + if (submit_bio_wait(WRITE, wbio) == 0) + /* failure! */ + ok = rdev_set_badblocks(rdev, sector, + sectors, 0) + && ok; + + bio_put(wbio); + sect_to_write -= sectors; + sector += sectors; + sectors = block_sectors; + } + return ok; +} + static void raid1d(mddev_t *mddev) { r1bio_t *r1_bio; @@ -1775,7 +1879,8 @@ static void raid1d(mddev_t *mddev) md_done_sync(mddev, s, 1); } else sync_request_write(mddev, r1_bio); - } else if (test_bit(R1BIO_MadeGood, &r1_bio->state)) { + } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) || + test_bit(R1BIO_WriteError, &r1_bio->state)) { int m; for (m = 0; m < conf->raid_disks ; m++) if (r1_bio->bios[m] == IO_MADE_GOOD) { @@ -1785,7 +1890,24 @@ static void raid1d(mddev_t *mddev) r1_bio->sector, r1_bio->sectors); rdev_dec_pending(rdev, mddev); + } else if (r1_bio->bios[m] != NULL) { + /* This drive got a write error. We + * need to narrow down and record + * precise write errors. + */ + if (!narrow_write_error(r1_bio, m)) { + md_error(mddev, + conf->mirrors[m].rdev); + /* an I/O failed, we can't clear + * the bitmap */ + set_bit(R1BIO_Degraded, + &r1_bio->state); + } + rdev_dec_pending(conf->mirrors[m].rdev, + mddev); } + if (test_bit(R1BIO_WriteError, &r1_bio->state)) + close_write(r1_bio); raid_end_bio_io(r1_bio); } else if (test_bit(R1BIO_ReadError, &r1_bio->state)) { int disk; diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h index b7885474e96c..e0d676b48974 100644 --- a/drivers/md/raid1.h +++ b/drivers/md/raid1.h @@ -145,7 +145,8 @@ struct r1bio_s { /* If a write for this request means we can clear some * known-bad-block records, we set this flag */ -#define R1BIO_MadeGood 7 +#define R1BIO_MadeGood 7 +#define R1BIO_WriteError 8 extern int md_raid1_congested(mddev_t *mddev, int bits); -- cgit v1.2.3 From d8f05d2995d467a91db1af01637e6ffd94660ca8 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 28 Jul 2011 11:33:00 +1000 Subject: md/raid1: record badblocks found during resync etc. If we find a bad block while writing as part of resync/recovery we need to report that back to raid1d which must record the bad block, or fail the device. Similarly when fixing a read error, a further error should just record a bad block if possible rather than failing the device. Signed-off-by: NeilBrown Reviewed-by: Namhyung Kim --- drivers/md/raid1.c | 81 ++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 51 insertions(+), 30 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index b45d641f4f33..e6957151233b 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1386,7 +1386,9 @@ static void end_sync_write(struct bio *bio, int error) s += sync_blocks; sectors_to_go -= sync_blocks; } while (sectors_to_go > 0); - md_error(mddev, conf->mirrors[mirror].rdev); + set_bit(WriteErrorSeen, + &conf->mirrors[mirror].rdev->flags); + set_bit(R1BIO_WriteError, &r1_bio->state); } else if (is_badblock(conf->mirrors[mirror].rdev, r1_bio->sector, r1_bio->sectors, @@ -1397,7 +1399,8 @@ static void end_sync_write(struct bio *bio, int error) if (atomic_dec_and_test(&r1_bio->remaining)) { int s = r1_bio->sectors; - if (test_bit(R1BIO_MadeGood, &r1_bio->state)) + if (test_bit(R1BIO_MadeGood, &r1_bio->state) || + test_bit(R1BIO_WriteError, &r1_bio->state)) reschedule_retry(r1_bio); else { put_buf(r1_bio); @@ -1406,6 +1409,20 @@ static void end_sync_write(struct bio *bio, int error) } } +static int r1_sync_page_io(mdk_rdev_t *rdev, sector_t sector, + int sectors, struct page *page, int rw) +{ + if (sync_page_io(rdev, sector, sectors << 9, page, rw, false)) + /* success */ + return 1; + if (rw == WRITE) + set_bit(WriteErrorSeen, &rdev->flags); + /* need to record an error - either for the block or the device */ + if (!rdev_set_badblocks(rdev, sector, sectors, 0)) + md_error(rdev->mddev, rdev); + return 0; +} + static int fix_sync_read_error(r1bio_t *r1_bio) { /* Try some synchronous reads of other devices to get @@ -1477,12 +1494,11 @@ static int fix_sync_read_error(r1bio_t *r1_bio) if (r1_bio->bios[d]->bi_end_io != end_sync_read) continue; rdev = conf->mirrors[d].rdev; - if (sync_page_io(rdev, sect, s<<9, - bio->bi_io_vec[idx].bv_page, - WRITE, false) == 0) { + if (r1_sync_page_io(rdev, sect, s, + bio->bi_io_vec[idx].bv_page, + WRITE) == 0) { r1_bio->bios[d]->bi_end_io = NULL; rdev_dec_pending(rdev, mddev); - md_error(mddev, rdev); } } d = start; @@ -1493,11 +1509,9 @@ static int fix_sync_read_error(r1bio_t *r1_bio) if (r1_bio->bios[d]->bi_end_io != end_sync_read) continue; rdev = conf->mirrors[d].rdev; - if (sync_page_io(rdev, sect, s<<9, - bio->bi_io_vec[idx].bv_page, - READ, false) == 0) - md_error(mddev, rdev); - else + if (r1_sync_page_io(rdev, sect, s, + bio->bi_io_vec[idx].bv_page, + READ) != 0) atomic_add(s, &rdev->corrected_errors); } sectors -= s; @@ -1682,8 +1696,10 @@ static void fix_read_error(conf_t *conf, int read_disk, } while (!success && d != read_disk); if (!success) { - /* Cannot read from anywhere -- bye bye array */ - md_error(mddev, conf->mirrors[read_disk].rdev); + /* Cannot read from anywhere - mark it bad */ + mdk_rdev_t *rdev = conf->mirrors[read_disk].rdev; + if (!rdev_set_badblocks(rdev, sect, s, 0)) + md_error(mddev, rdev); break; } /* write it back and re-read */ @@ -1694,13 +1710,9 @@ static void fix_read_error(conf_t *conf, int read_disk, d--; rdev = conf->mirrors[d].rdev; if (rdev && - test_bit(In_sync, &rdev->flags)) { - if (sync_page_io(rdev, sect, s<<9, - conf->tmppage, WRITE, false) - == 0) - /* Well, this device is dead */ - md_error(mddev, rdev); - } + test_bit(In_sync, &rdev->flags)) + r1_sync_page_io(rdev, sect, s, + conf->tmppage, WRITE); } d = start; while (d != read_disk) { @@ -1711,12 +1723,8 @@ static void fix_read_error(conf_t *conf, int read_disk, rdev = conf->mirrors[d].rdev; if (rdev && test_bit(In_sync, &rdev->flags)) { - if (sync_page_io(rdev, sect, s<<9, - conf->tmppage, READ, false) - == 0) - /* Well, this device is dead */ - md_error(mddev, rdev); - else { + if (r1_sync_page_io(rdev, sect, s, + conf->tmppage, READ)) { atomic_add(s, &rdev->corrected_errors); printk(KERN_INFO "md/raid1:%s: read error corrected " @@ -1860,20 +1868,33 @@ static void raid1d(mddev_t *mddev) mddev = r1_bio->mddev; conf = mddev->private; if (test_bit(R1BIO_IsSync, &r1_bio->state)) { - if (test_bit(R1BIO_MadeGood, &r1_bio->state)) { + if (test_bit(R1BIO_MadeGood, &r1_bio->state) || + test_bit(R1BIO_WriteError, &r1_bio->state)) { int m; int s = r1_bio->sectors; for (m = 0; m < conf->raid_disks ; m++) { + mdk_rdev_t *rdev + = conf->mirrors[m].rdev; struct bio *bio = r1_bio->bios[m]; - if (bio->bi_end_io != NULL && - test_bit(BIO_UPTODATE, + if (bio->bi_end_io == NULL) + continue; + if (test_bit(BIO_UPTODATE, &bio->bi_flags)) { - rdev = conf->mirrors[m].rdev; rdev_clear_badblocks( rdev, r1_bio->sector, r1_bio->sectors); } + if (!test_bit(BIO_UPTODATE, + &bio->bi_flags) && + test_bit(R1BIO_WriteError, + &r1_bio->state)) { + if (!rdev_set_badblocks( + rdev, + r1_bio->sector, + r1_bio->sectors, 0)) + md_error(mddev, rdev); + } } put_buf(r1_bio); md_done_sync(mddev, s, 1); -- cgit v1.2.3 From 3a9f28a5117e00a868dd8b4395f9a707ae56764b Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 28 Jul 2011 11:33:42 +1000 Subject: md/raid1: improve handling of read failure during recovery. If we cannot read a block from anywhere during recovery, there is now a better approach than just giving up. We can record a bad block on each device and keep going - being careful not to clear the bad block when a write succeeds as it might - it will be a write of incorrect data. We have now reached the state where - for raid1 - we only call md_error if md_set_badblocks has failed. Signed-off-by: NeilBrown Reviewed-by: Namhyung Kim --- drivers/md/raid1.c | 41 ++++++++++++++++++++++++++++++++++------- 1 file changed, 34 insertions(+), 7 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index e6957151233b..039e3af72929 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1392,7 +1392,12 @@ static void end_sync_write(struct bio *bio, int error) } else if (is_badblock(conf->mirrors[mirror].rdev, r1_bio->sector, r1_bio->sectors, - &first_bad, &bad_sectors)) + &first_bad, &bad_sectors) && + !is_badblock(conf->mirrors[r1_bio->read_disk].rdev, + r1_bio->sector, + r1_bio->sectors, + &first_bad, &bad_sectors) + ) set_bit(R1BIO_MadeGood, &r1_bio->state); update_head_pos(mirror, r1_bio); @@ -1473,16 +1478,36 @@ static int fix_sync_read_error(r1bio_t *r1_bio) if (!success) { char b[BDEVNAME_SIZE]; - /* Cannot read from anywhere, array is toast */ - md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev); + int abort = 0; + /* Cannot read from anywhere, this block is lost. + * Record a bad block on each device. If that doesn't + * work just disable and interrupt the recovery. + * Don't fail devices as that won't really help. + */ printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error" " for block %llu\n", mdname(mddev), bdevname(bio->bi_bdev, b), (unsigned long long)r1_bio->sector); - md_done_sync(mddev, r1_bio->sectors, 0); - put_buf(r1_bio); - return 0; + for (d = 0; d < conf->raid_disks; d++) { + rdev = conf->mirrors[d].rdev; + if (!rdev || test_bit(Faulty, &rdev->flags)) + continue; + if (!rdev_set_badblocks(rdev, sect, s, 0)) + abort = 1; + } + if (abort) { + mddev->recovery_disabled = 1; + set_bit(MD_RECOVERY_INTR, &mddev->recovery); + md_done_sync(mddev, r1_bio->sectors, 0); + put_buf(r1_bio); + return 0; + } + /* Try next page */ + sectors -= s; + sect += s; + idx++; + continue; } start = d; @@ -1879,7 +1904,9 @@ static void raid1d(mddev_t *mddev) if (bio->bi_end_io == NULL) continue; if (test_bit(BIO_UPTODATE, - &bio->bi_flags)) { + &bio->bi_flags) && + test_bit(R1BIO_MadeGood, + &r1_bio->state)) { rdev_clear_badblocks( rdev, r1_bio->sector, -- cgit v1.2.3 From 62096bce231b3760882ed91205fc84682d6b0529 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 28 Jul 2011 11:38:13 +1000 Subject: md/raid1: factor several functions out or raid1d() raid1d is too big with several deep branches. So separate them out into their own functions. Signed-off-by: NeilBrown Reviewed-by: Namhyung Kim --- drivers/md/raid1.c | 310 ++++++++++++++++++++++++++--------------------------- 1 file changed, 151 insertions(+), 159 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 039e3af72929..32323f0afd89 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1861,21 +1861,160 @@ static int narrow_write_error(r1bio_t *r1_bio, int i) return ok; } +static void handle_sync_write_finished(conf_t *conf, r1bio_t *r1_bio) +{ + int m; + int s = r1_bio->sectors; + for (m = 0; m < conf->raid_disks ; m++) { + mdk_rdev_t *rdev = conf->mirrors[m].rdev; + struct bio *bio = r1_bio->bios[m]; + if (bio->bi_end_io == NULL) + continue; + if (test_bit(BIO_UPTODATE, &bio->bi_flags) && + test_bit(R1BIO_MadeGood, &r1_bio->state)) { + rdev_clear_badblocks(rdev, r1_bio->sector, s); + } + if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && + test_bit(R1BIO_WriteError, &r1_bio->state)) { + if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0)) + md_error(conf->mddev, rdev); + } + } + put_buf(r1_bio); + md_done_sync(conf->mddev, s, 1); +} + +static void handle_write_finished(conf_t *conf, r1bio_t *r1_bio) +{ + int m; + for (m = 0; m < conf->raid_disks ; m++) + if (r1_bio->bios[m] == IO_MADE_GOOD) { + mdk_rdev_t *rdev = conf->mirrors[m].rdev; + rdev_clear_badblocks(rdev, + r1_bio->sector, + r1_bio->sectors); + rdev_dec_pending(rdev, conf->mddev); + } else if (r1_bio->bios[m] != NULL) { + /* This drive got a write error. We need to + * narrow down and record precise write + * errors. + */ + if (!narrow_write_error(r1_bio, m)) { + md_error(conf->mddev, + conf->mirrors[m].rdev); + /* an I/O failed, we can't clear the bitmap */ + set_bit(R1BIO_Degraded, &r1_bio->state); + } + rdev_dec_pending(conf->mirrors[m].rdev, + conf->mddev); + } + if (test_bit(R1BIO_WriteError, &r1_bio->state)) + close_write(r1_bio); + raid_end_bio_io(r1_bio); +} + +static void handle_read_error(conf_t *conf, r1bio_t *r1_bio) +{ + int disk; + int max_sectors; + mddev_t *mddev = conf->mddev; + struct bio *bio; + char b[BDEVNAME_SIZE]; + mdk_rdev_t *rdev; + + clear_bit(R1BIO_ReadError, &r1_bio->state); + /* we got a read error. Maybe the drive is bad. Maybe just + * the block and we can fix it. + * We freeze all other IO, and try reading the block from + * other devices. When we find one, we re-write + * and check it that fixes the read error. + * This is all done synchronously while the array is + * frozen + */ + if (mddev->ro == 0) { + freeze_array(conf); + fix_read_error(conf, r1_bio->read_disk, + r1_bio->sector, r1_bio->sectors); + unfreeze_array(conf); + } else + md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev); + + bio = r1_bio->bios[r1_bio->read_disk]; + bdevname(bio->bi_bdev, b); +read_more: + disk = read_balance(conf, r1_bio, &max_sectors); + if (disk == -1) { + printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O" + " read error for block %llu\n", + mdname(mddev), b, (unsigned long long)r1_bio->sector); + raid_end_bio_io(r1_bio); + } else { + const unsigned long do_sync + = r1_bio->master_bio->bi_rw & REQ_SYNC; + if (bio) { + r1_bio->bios[r1_bio->read_disk] = + mddev->ro ? IO_BLOCKED : NULL; + bio_put(bio); + } + r1_bio->read_disk = disk; + bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev); + md_trim_bio(bio, r1_bio->sector - bio->bi_sector, max_sectors); + r1_bio->bios[r1_bio->read_disk] = bio; + rdev = conf->mirrors[disk].rdev; + printk_ratelimited(KERN_ERR + "md/raid1:%s: redirecting sector %llu" + " to other mirror: %s\n", + mdname(mddev), + (unsigned long long)r1_bio->sector, + bdevname(rdev->bdev, b)); + bio->bi_sector = r1_bio->sector + rdev->data_offset; + bio->bi_bdev = rdev->bdev; + bio->bi_end_io = raid1_end_read_request; + bio->bi_rw = READ | do_sync; + bio->bi_private = r1_bio; + if (max_sectors < r1_bio->sectors) { + /* Drat - have to split this up more */ + struct bio *mbio = r1_bio->master_bio; + int sectors_handled = (r1_bio->sector + max_sectors + - mbio->bi_sector); + r1_bio->sectors = max_sectors; + spin_lock_irq(&conf->device_lock); + if (mbio->bi_phys_segments == 0) + mbio->bi_phys_segments = 2; + else + mbio->bi_phys_segments++; + spin_unlock_irq(&conf->device_lock); + generic_make_request(bio); + bio = NULL; + + r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); + + r1_bio->master_bio = mbio; + r1_bio->sectors = (mbio->bi_size >> 9) + - sectors_handled; + r1_bio->state = 0; + set_bit(R1BIO_ReadError, &r1_bio->state); + r1_bio->mddev = mddev; + r1_bio->sector = mbio->bi_sector + sectors_handled; + + goto read_more; + } else + generic_make_request(bio); + } +} + static void raid1d(mddev_t *mddev) { r1bio_t *r1_bio; - struct bio *bio; unsigned long flags; conf_t *conf = mddev->private; struct list_head *head = &conf->retry_list; - mdk_rdev_t *rdev; struct blk_plug plug; md_check_recovery(mddev); blk_start_plug(&plug); for (;;) { - char b[BDEVNAME_SIZE]; if (atomic_read(&mddev->plug_cnt) == 0) flush_pending_writes(conf); @@ -1894,168 +2033,21 @@ static void raid1d(mddev_t *mddev) conf = mddev->private; if (test_bit(R1BIO_IsSync, &r1_bio->state)) { if (test_bit(R1BIO_MadeGood, &r1_bio->state) || - test_bit(R1BIO_WriteError, &r1_bio->state)) { - int m; - int s = r1_bio->sectors; - for (m = 0; m < conf->raid_disks ; m++) { - mdk_rdev_t *rdev - = conf->mirrors[m].rdev; - struct bio *bio = r1_bio->bios[m]; - if (bio->bi_end_io == NULL) - continue; - if (test_bit(BIO_UPTODATE, - &bio->bi_flags) && - test_bit(R1BIO_MadeGood, - &r1_bio->state)) { - rdev_clear_badblocks( - rdev, - r1_bio->sector, - r1_bio->sectors); - } - if (!test_bit(BIO_UPTODATE, - &bio->bi_flags) && - test_bit(R1BIO_WriteError, - &r1_bio->state)) { - if (!rdev_set_badblocks( - rdev, - r1_bio->sector, - r1_bio->sectors, 0)) - md_error(mddev, rdev); - } - } - put_buf(r1_bio); - md_done_sync(mddev, s, 1); - } else + test_bit(R1BIO_WriteError, &r1_bio->state)) + handle_sync_write_finished(conf, r1_bio); + else sync_request_write(mddev, r1_bio); } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) || - test_bit(R1BIO_WriteError, &r1_bio->state)) { - int m; - for (m = 0; m < conf->raid_disks ; m++) - if (r1_bio->bios[m] == IO_MADE_GOOD) { - rdev = conf->mirrors[m].rdev; - rdev_clear_badblocks( - rdev, - r1_bio->sector, - r1_bio->sectors); - rdev_dec_pending(rdev, mddev); - } else if (r1_bio->bios[m] != NULL) { - /* This drive got a write error. We - * need to narrow down and record - * precise write errors. - */ - if (!narrow_write_error(r1_bio, m)) { - md_error(mddev, - conf->mirrors[m].rdev); - /* an I/O failed, we can't clear - * the bitmap */ - set_bit(R1BIO_Degraded, - &r1_bio->state); - } - rdev_dec_pending(conf->mirrors[m].rdev, - mddev); - } - if (test_bit(R1BIO_WriteError, &r1_bio->state)) - close_write(r1_bio); - raid_end_bio_io(r1_bio); - } else if (test_bit(R1BIO_ReadError, &r1_bio->state)) { - int disk; - int max_sectors; - - clear_bit(R1BIO_ReadError, &r1_bio->state); - /* we got a read error. Maybe the drive is bad. Maybe just - * the block and we can fix it. - * We freeze all other IO, and try reading the block from - * other devices. When we find one, we re-write - * and check it that fixes the read error. - * This is all done synchronously while the array is - * frozen - */ - if (mddev->ro == 0) { - freeze_array(conf); - fix_read_error(conf, r1_bio->read_disk, - r1_bio->sector, - r1_bio->sectors); - unfreeze_array(conf); - } else - md_error(mddev, - conf->mirrors[r1_bio->read_disk].rdev); - - bio = r1_bio->bios[r1_bio->read_disk]; - bdevname(bio->bi_bdev, b); -read_more: - disk = read_balance(conf, r1_bio, &max_sectors); - if (disk == -1) { - printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O" - " read error for block %llu\n", - mdname(mddev), b, - (unsigned long long)r1_bio->sector); - raid_end_bio_io(r1_bio); - } else { - const unsigned long do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC; - if (bio) { - r1_bio->bios[r1_bio->read_disk] = - mddev->ro ? IO_BLOCKED : NULL; - bio_put(bio); - } - r1_bio->read_disk = disk; - bio = bio_clone_mddev(r1_bio->master_bio, - GFP_NOIO, mddev); - md_trim_bio(bio, - r1_bio->sector - bio->bi_sector, - max_sectors); - r1_bio->bios[r1_bio->read_disk] = bio; - rdev = conf->mirrors[disk].rdev; - printk_ratelimited( - KERN_ERR - "md/raid1:%s: redirecting sector %llu" - " to other mirror: %s\n", - mdname(mddev), - (unsigned long long)r1_bio->sector, - bdevname(rdev->bdev, b)); - bio->bi_sector = r1_bio->sector + rdev->data_offset; - bio->bi_bdev = rdev->bdev; - bio->bi_end_io = raid1_end_read_request; - bio->bi_rw = READ | do_sync; - bio->bi_private = r1_bio; - if (max_sectors < r1_bio->sectors) { - /* Drat - have to split this up more */ - struct bio *mbio = r1_bio->master_bio; - int sectors_handled = - r1_bio->sector + max_sectors - - mbio->bi_sector; - r1_bio->sectors = max_sectors; - spin_lock_irq(&conf->device_lock); - if (mbio->bi_phys_segments == 0) - mbio->bi_phys_segments = 2; - else - mbio->bi_phys_segments++; - spin_unlock_irq(&conf->device_lock); - generic_make_request(bio); - bio = NULL; - - r1_bio = mempool_alloc(conf->r1bio_pool, - GFP_NOIO); - - r1_bio->master_bio = mbio; - r1_bio->sectors = (mbio->bi_size >> 9) - - sectors_handled; - r1_bio->state = 0; - set_bit(R1BIO_ReadError, - &r1_bio->state); - r1_bio->mddev = mddev; - r1_bio->sector = mbio->bi_sector - + sectors_handled; - - goto read_more; - } else - generic_make_request(bio); - } - } else { + test_bit(R1BIO_WriteError, &r1_bio->state)) + handle_write_finished(conf, r1_bio); + else if (test_bit(R1BIO_ReadError, &r1_bio->state)) + handle_read_error(conf, r1_bio); + else /* just a partial read to be scheduled from separate * context */ generic_make_request(r1_bio->bios[r1_bio->read_disk]); - } + cond_resched(); if (mddev->flags & ~(1< Date: Thu, 28 Jul 2011 11:39:22 +1000 Subject: md/raid5: avoid reading from known bad blocks. There are two times that we might read in raid5: 1/ when a read request fits within a chunk on a single working device. In this case, if there is any bad block in the range of the read, we simply fail the cache-bypass read and perform the read though the stripe cache. 2/ when reading into the stripe cache. In this case we mark as failed any device which has a bad block in that strip (1 page wide). Note that we will both avoid reading and avoid writing. This is correct (as we will never read from the block, there is no point writing), but not optimal (as writing could 'fix' the error) - that will be addressed later. If we have not seen any write errors on the device yet, we treat a bad block like a recent read error. This will encourage an attempt to fix the read error which will either generate a write error, or will ensure good data is stored there. We don't yet forget the bad block in that case. That comes later. Now that we honour bad blocks when reading we can allow devices with bad blocks into the array. Signed-off-by: NeilBrown --- drivers/md/raid5.c | 46 ++++++++++++++++++++++++++++++++-------------- 1 file changed, 32 insertions(+), 14 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 304389ba5e27..a2d68389ee75 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2923,6 +2923,9 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) spin_lock_irq(&conf->device_lock); for (i=disks; i--; ) { mdk_rdev_t *rdev; + sector_t first_bad; + int bad_sectors; + int is_bad = 0; dev = &sh->dev[i]; @@ -2959,15 +2962,32 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) if (dev->written) s->written++; rdev = rcu_dereference(conf->disks[i].rdev); - if (s->blocked_rdev == NULL && - rdev && unlikely(test_bit(Blocked, &rdev->flags))) { - s->blocked_rdev = rdev; - atomic_inc(&rdev->nr_pending); + if (rdev) { + is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, + &first_bad, &bad_sectors); + if (s->blocked_rdev == NULL + && (test_bit(Blocked, &rdev->flags) + || is_bad < 0)) { + if (is_bad < 0) + set_bit(BlockedBadBlocks, + &rdev->flags); + s->blocked_rdev = rdev; + atomic_inc(&rdev->nr_pending); + } } clear_bit(R5_Insync, &dev->flags); if (!rdev) /* Not in-sync */; - else if (test_bit(In_sync, &rdev->flags)) + else if (is_bad) { + /* also not in-sync */ + if (!test_bit(WriteErrorSeen, &rdev->flags)) { + /* treat as in-sync, but with a read error + * which we can now try to correct + */ + set_bit(R5_Insync, &dev->flags); + set_bit(R5_ReadError, &dev->flags); + } + } else if (test_bit(In_sync, &rdev->flags)) set_bit(R5_Insync, &dev->flags); else { /* in sync if before recovery_offset */ @@ -3471,6 +3491,9 @@ static int chunk_aligned_read(mddev_t *mddev, struct bio * raid_bio) rcu_read_lock(); rdev = rcu_dereference(conf->disks[dd_idx].rdev); if (rdev && test_bit(In_sync, &rdev->flags)) { + sector_t first_bad; + int bad_sectors; + atomic_inc(&rdev->nr_pending); rcu_read_unlock(); raid_bio->bi_next = (void*)rdev; @@ -3478,8 +3501,10 @@ static int chunk_aligned_read(mddev_t *mddev, struct bio * raid_bio) align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); align_bi->bi_sector += rdev->data_offset; - if (!bio_fits_rdev(align_bi)) { - /* too big in some way */ + if (!bio_fits_rdev(align_bi) || + is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9, + &first_bad, &bad_sectors)) { + /* too big in some way, or has a known bad block */ bio_put(align_bi); rdev_dec_pending(rdev, mddev); return 0; @@ -4671,10 +4696,6 @@ static int run(mddev_t *mddev) * 0 for a fully functional array, 1 or 2 for a degraded array. */ list_for_each_entry(rdev, &mddev->disks, same_set) { - if (rdev->badblocks.count) { - printk(KERN_ERR "md/raid5: cannot handle bad blocks yet\n"); - goto abort; - } if (rdev->raid_disk < 0) continue; if (test_bit(In_sync, &rdev->flags)) { @@ -4983,9 +5004,6 @@ static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) int first = 0; int last = conf->raid_disks - 1; - if (rdev->badblocks.count) - return -EINVAL; - if (has_failed(conf)) /* no point adding a device */ return -EINVAL; -- cgit v1.2.3 From 7f0da59bdc2f65795a57009d78f7753d3aea1de3 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 28 Jul 2011 11:39:22 +1000 Subject: md/raid5: use bad-block log to improve handling of uncorrectable read errors. If we get an uncorrectable read error - record a bad block rather than failing the device. And if these errors (which may be due to known bad blocks) cause recovery to be impossible, record a bad block on the recovering devices, or abort the recovery. As we might abort a recovery without failing a device we need to teach RAID5 about recovery_disabled handling. Signed-off-by: NeilBrown --- drivers/md/raid5.c | 59 +++++++++++++++++++++++++++++++++++++++++++++++------- drivers/md/raid5.h | 2 +- 2 files changed, 53 insertions(+), 8 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index a2d68389ee75..5fc621673e6c 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2232,9 +2232,18 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh, rcu_read_lock(); rdev = rcu_dereference(conf->disks[i].rdev); if (rdev && test_bit(In_sync, &rdev->flags)) - /* multiple read failures in one stripe */ - md_error(conf->mddev, rdev); + atomic_inc(&rdev->nr_pending); + else + rdev = NULL; rcu_read_unlock(); + if (rdev) { + if (!rdev_set_badblocks( + rdev, + sh->sector, + STRIPE_SECTORS, 0)) + md_error(conf->mddev, rdev); + rdev_dec_pending(rdev, conf->mddev); + } } spin_lock_irq(&conf->device_lock); /* fail all writes first */ @@ -2313,6 +2322,41 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh, md_wakeup_thread(conf->mddev->thread); } +static void +handle_failed_sync(raid5_conf_t *conf, struct stripe_head *sh, + struct stripe_head_state *s) +{ + int abort = 0; + int i; + + md_done_sync(conf->mddev, STRIPE_SECTORS, 0); + clear_bit(STRIPE_SYNCING, &sh->state); + s->syncing = 0; + /* There is nothing more to do for sync/check/repair. + * For recover we need to record a bad block on all + * non-sync devices, or abort the recovery + */ + if (!test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) + return; + /* During recovery devices cannot be removed, so locking and + * refcounting of rdevs is not needed + */ + for (i = 0; i < conf->raid_disks; i++) { + mdk_rdev_t *rdev = conf->disks[i].rdev; + if (!rdev + || test_bit(Faulty, &rdev->flags) + || test_bit(In_sync, &rdev->flags)) + continue; + if (!rdev_set_badblocks(rdev, sh->sector, + STRIPE_SECTORS, 0)) + abort = 1; + } + if (abort) { + conf->recovery_disabled = conf->mddev->recovery_disabled; + set_bit(MD_RECOVERY_INTR, &conf->mddev->recovery); + } +} + /* fetch_block - checks the given member device to see if its data needs * to be read or computed to satisfy a request. * @@ -3067,11 +3111,8 @@ static void handle_stripe(struct stripe_head *sh) */ if (s.failed > conf->max_degraded && s.to_read+s.to_write+s.written) handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); - if (s.failed > conf->max_degraded && s.syncing) { - md_done_sync(conf->mddev, STRIPE_SECTORS, 0); - clear_bit(STRIPE_SYNCING, &sh->state); - s.syncing = 0; - } + if (s.failed > conf->max_degraded && s.syncing) + handle_failed_sync(conf, sh, &s); /* * might be able to return some write requests if the parity blocks @@ -4976,6 +5017,7 @@ static int raid5_remove_disk(mddev_t *mddev, int number) * isn't possible. */ if (!test_bit(Faulty, &rdev->flags) && + mddev->recovery_disabled != conf->recovery_disabled && !has_failed(conf) && number < conf->raid_disks) { err = -EBUSY; @@ -5004,6 +5046,9 @@ static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) int first = 0; int last = conf->raid_disks - 1; + if (mddev->recovery_disabled == conf->recovery_disabled) + return -EBUSY; + if (has_failed(conf)) /* no point adding a device */ return -EINVAL; diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 68c500af1108..c5429d123636 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -399,7 +399,7 @@ struct raid5_private_data { * (fresh device added). * Cleared when a sync completes. */ - + int recovery_disabled; /* per cpu variables */ struct raid5_percpu { struct page *spare_page; /* Used when checking P/Q in raid6 */ -- cgit v1.2.3 From bc2607f393bd4fb844c1886a02af929ca0372056 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 28 Jul 2011 11:39:22 +1000 Subject: md/raid5: write errors should be recorded as bad blocks if possible. When a write error is detected, don't mark the device as failed immediately but rather record the fact for handle_stripe to deal with. Handle_stripe then attempts to record a bad block. Only if that fails does the device get marked as faulty. Signed-off-by: NeilBrown --- drivers/md/raid5.c | 33 +++++++++++++++++++++++++++++++-- drivers/md/raid5.h | 18 ++++++++++-------- 2 files changed, 41 insertions(+), 10 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 5fc621673e6c..9768a7d67148 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1658,8 +1658,10 @@ static void raid5_end_write_request(struct bio *bi, int error) return; } - if (!uptodate) - md_error(conf->mddev, conf->disks[i].rdev); + if (!uptodate) { + set_bit(WriteErrorSeen, &conf->disks[i].rdev->flags); + set_bit(R5_WriteError, &sh->dev[i].flags); + } rdev_dec_pending(conf->disks[i].rdev, conf->mddev); @@ -3038,6 +3040,14 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) set_bit(R5_Insync, &dev->flags); } + if (test_bit(R5_WriteError, &dev->flags)) { + clear_bit(R5_Insync, &dev->flags); + if (!test_bit(Faulty, &rdev->flags)) { + s->handle_bad_blocks = 1; + atomic_inc(&rdev->nr_pending); + } else + clear_bit(R5_WriteError, &dev->flags); + } if (!test_bit(R5_Insync, &dev->flags)) { /* The ReadError flag will just be confusing now */ clear_bit(R5_ReadError, &dev->flags); @@ -3086,6 +3096,11 @@ static void handle_stripe(struct stripe_head *sh) analyse_stripe(sh, &s); + if (s.handle_bad_blocks) { + set_bit(STRIPE_HANDLE, &sh->state); + goto finish; + } + if (unlikely(s.blocked_rdev)) { if (s.syncing || s.expanding || s.expanded || s.to_write || s.written) { @@ -3283,6 +3298,20 @@ finish: if (unlikely(s.blocked_rdev)) md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev); + if (s.handle_bad_blocks) + for (i = disks; i--; ) { + mdk_rdev_t *rdev; + struct r5dev *dev = &sh->dev[i]; + if (test_and_clear_bit(R5_WriteError, &dev->flags)) { + /* We own a safe reference to the rdev */ + rdev = conf->disks[i].rdev; + if (!rdev_set_badblocks(rdev, sh->sector, + STRIPE_SECTORS, 0)) + md_error(conf->mddev, rdev); + rdev_dec_pending(rdev, conf->mddev); + } + } + if (s.ops_request) raid_run_ops(sh, s.ops_request); diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index c5429d123636..8620cb67ae39 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -249,6 +249,7 @@ struct stripe_head_state { struct bio *return_bi; mdk_rdev_t *blocked_rdev; + int handle_bad_blocks; }; /* Flags */ @@ -264,14 +265,15 @@ struct stripe_head_state { #define R5_ReWrite 9 /* have tried to over-write the readerror */ #define R5_Expanded 10 /* This block now has post-expand data */ -#define R5_Wantcompute 11 /* compute_block in progress treat as - * uptodate - */ -#define R5_Wantfill 12 /* dev->toread contains a bio that needs - * filling - */ -#define R5_Wantdrain 13 /* dev->towrite needs to be drained */ -#define R5_WantFUA 14 /* Write should be FUA */ +#define R5_Wantcompute 11 /* compute_block in progress treat as + * uptodate + */ +#define R5_Wantfill 12 /* dev->toread contains a bio that needs + * filling + */ +#define R5_Wantdrain 13 /* dev->towrite needs to be drained */ +#define R5_WantFUA 14 /* Write should be FUA */ +#define R5_WriteError 15 /* got a write error - need to record it */ /* * Write method */ -- cgit v1.2.3 From 73e92e51b7969ef5477dd28fe2ae4d77675896f4 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 28 Jul 2011 11:39:22 +1000 Subject: md/raid5. Don't write to known bad block on doubtful devices. If a device has seen write errors, don't write to any known bad blocks on that device. Signed-off-by: NeilBrown --- drivers/md/raid5.c | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 9768a7d67148..3fa3f20dc5f5 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -526,6 +526,36 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) atomic_inc(&rdev->nr_pending); rcu_read_unlock(); + /* We have already checked bad blocks for reads. Now + * need to check for writes. + */ + while ((rw & WRITE) && rdev && + test_bit(WriteErrorSeen, &rdev->flags)) { + sector_t first_bad; + int bad_sectors; + int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, + &first_bad, &bad_sectors); + if (!bad) + break; + + if (bad < 0) { + set_bit(BlockedBadBlocks, &rdev->flags); + if (!conf->mddev->external && + conf->mddev->flags) { + /* It is very unlikely, but we might + * still need to write out the + * bad block log - better give it + * a chance*/ + md_check_recovery(conf->mddev); + } + md_wait_for_blocked_rdev(rdev, conf->mddev); + } else { + /* Acknowledged bad block - skip the write */ + rdev_dec_pending(rdev, conf->mddev); + rdev = NULL; + } + } + if (rdev) { if (s->syncing || s->expanding || s->expanded) md_sync_acct(rdev->bdev, STRIPE_SECTORS); @@ -3317,7 +3347,6 @@ finish: ops_run_io(sh, &s); - if (s.dec_preread_active) { /* We delay this until after ops_run_io so that if make_request * is waiting on a flush, it won't continue until the writes -- cgit v1.2.3 From b84db560ead5417b5594349512baf8837959df4f Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 28 Jul 2011 11:39:23 +1000 Subject: md/raid5: Clear bad blocks on successful write. On a successful write to a known bad block, flag the sh so that raid5d can remove the known bad block from the list. Signed-off-by: NeilBrown --- drivers/md/raid5.c | 19 ++++++++++++++++++- drivers/md/raid5.h | 1 + 2 files changed, 19 insertions(+), 1 deletion(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 3fa3f20dc5f5..dbae459fb02d 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1675,6 +1675,8 @@ static void raid5_end_write_request(struct bio *bi, int error) raid5_conf_t *conf = sh->raid_conf; int disks = sh->disks, i; int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); + sector_t first_bad; + int bad_sectors; for (i=0 ; idev[i].req) @@ -1691,7 +1693,9 @@ static void raid5_end_write_request(struct bio *bi, int error) if (!uptodate) { set_bit(WriteErrorSeen, &conf->disks[i].rdev->flags); set_bit(R5_WriteError, &sh->dev[i].flags); - } + } else if (is_badblock(conf->disks[i].rdev, sh->sector, STRIPE_SECTORS, + &first_bad, &bad_sectors)) + set_bit(R5_MadeGood, &sh->dev[i].flags); rdev_dec_pending(conf->disks[i].rdev, conf->mddev); @@ -3078,6 +3082,13 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) } else clear_bit(R5_WriteError, &dev->flags); } + if (test_bit(R5_MadeGood, &dev->flags)) { + if (!test_bit(Faulty, &rdev->flags)) { + s->handle_bad_blocks = 1; + atomic_inc(&rdev->nr_pending); + } else + clear_bit(R5_MadeGood, &dev->flags); + } if (!test_bit(R5_Insync, &dev->flags)) { /* The ReadError flag will just be confusing now */ clear_bit(R5_ReadError, &dev->flags); @@ -3340,6 +3351,12 @@ finish: md_error(conf->mddev, rdev); rdev_dec_pending(rdev, conf->mddev); } + if (test_and_clear_bit(R5_MadeGood, &dev->flags)) { + rdev = conf->disks[i].rdev; + rdev_clear_badblocks(rdev, sh->sector, + STRIPE_SECTORS); + rdev_dec_pending(rdev, conf->mddev); + } } if (s.ops_request) diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 8620cb67ae39..11b9566184b2 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -274,6 +274,7 @@ struct stripe_head_state { #define R5_Wantdrain 13 /* dev->towrite needs to be drained */ #define R5_WantFUA 14 /* Write should be FUA */ #define R5_WriteError 15 /* got a write error - need to record it */ +#define R5_MadeGood 16 /* A bad block has been fixed by writing to it*/ /* * Write method */ -- cgit v1.2.3 From 1294b9c973251a5e68b62c9b40dd914517bda675 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 28 Jul 2011 11:39:23 +1000 Subject: md/raid10: simplify/reindent some loops. When a loop ends with a large if, it can be neater to change the if to invert the condition and just 'continue'. Then the body of the if can be indented to a lower level. Signed-off-by: NeilBrown --- drivers/md/raid10.c | 127 +++++++++++++++++++++++++++------------------------- 1 file changed, 65 insertions(+), 62 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index fe6692e62215..c489b5c6ed6d 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1533,80 +1533,83 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio) sl--; d = r10_bio->devs[sl].devnum; rdev = rcu_dereference(conf->mirrors[d].rdev); - if (rdev && - test_bit(In_sync, &rdev->flags)) { - atomic_inc(&rdev->nr_pending); - rcu_read_unlock(); - if (sync_page_io(rdev, - r10_bio->devs[sl].addr + - sect, - s<<9, conf->tmppage, WRITE, false) - == 0) { - /* Well, this device is dead */ - printk(KERN_NOTICE - "md/raid10:%s: read correction " - "write failed" - " (%d sectors at %llu on %s)\n", - mdname(mddev), s, - (unsigned long long)( - sect + rdev->data_offset), - bdevname(rdev->bdev, b)); - printk(KERN_NOTICE "md/raid10:%s: %s: failing " - "drive\n", - mdname(mddev), - bdevname(rdev->bdev, b)); - md_error(mddev, rdev); - } - rdev_dec_pending(rdev, mddev); - rcu_read_lock(); + if (!rdev || + !test_bit(In_sync, &rdev->flags)) + continue; + + atomic_inc(&rdev->nr_pending); + rcu_read_unlock(); + if (sync_page_io(rdev, + r10_bio->devs[sl].addr + + sect, + s<<9, conf->tmppage, WRITE, false) + == 0) { + /* Well, this device is dead */ + printk(KERN_NOTICE + "md/raid10:%s: read correction " + "write failed" + " (%d sectors at %llu on %s)\n", + mdname(mddev), s, + (unsigned long long)( + sect + rdev->data_offset), + bdevname(rdev->bdev, b)); + printk(KERN_NOTICE "md/raid10:%s: %s: failing " + "drive\n", + mdname(mddev), + bdevname(rdev->bdev, b)); + md_error(mddev, rdev); } + rdev_dec_pending(rdev, mddev); + rcu_read_lock(); } sl = start; while (sl != r10_bio->read_slot) { + char b[BDEVNAME_SIZE]; if (sl==0) sl = conf->copies; sl--; d = r10_bio->devs[sl].devnum; rdev = rcu_dereference(conf->mirrors[d].rdev); - if (rdev && - test_bit(In_sync, &rdev->flags)) { - char b[BDEVNAME_SIZE]; - atomic_inc(&rdev->nr_pending); - rcu_read_unlock(); - if (sync_page_io(rdev, - r10_bio->devs[sl].addr + - sect, - s<<9, conf->tmppage, - READ, false) == 0) { - /* Well, this device is dead */ - printk(KERN_NOTICE - "md/raid10:%s: unable to read back " - "corrected sectors" - " (%d sectors at %llu on %s)\n", - mdname(mddev), s, - (unsigned long long)( - sect + rdev->data_offset), - bdevname(rdev->bdev, b)); - printk(KERN_NOTICE "md/raid10:%s: %s: failing drive\n", - mdname(mddev), - bdevname(rdev->bdev, b)); - - md_error(mddev, rdev); - } else { - printk(KERN_INFO - "md/raid10:%s: read error corrected" - " (%d sectors at %llu on %s)\n", - mdname(mddev), s, - (unsigned long long)( - sect + rdev->data_offset), - bdevname(rdev->bdev, b)); - atomic_add(s, &rdev->corrected_errors); - } + if (!rdev || + !test_bit(In_sync, &rdev->flags)) + continue; - rdev_dec_pending(rdev, mddev); - rcu_read_lock(); + atomic_inc(&rdev->nr_pending); + rcu_read_unlock(); + if (sync_page_io(rdev, + r10_bio->devs[sl].addr + + sect, + s<<9, conf->tmppage, + READ, false) == 0) { + /* Well, this device is dead */ + printk(KERN_NOTICE + "md/raid10:%s: unable to read back " + "corrected sectors" + " (%d sectors at %llu on %s)\n", + mdname(mddev), s, + (unsigned long long)( + sect + rdev->data_offset), + bdevname(rdev->bdev, b)); + printk(KERN_NOTICE "md/raid10:%s: %s: failing " + "drive\n", + mdname(mddev), + bdevname(rdev->bdev, b)); + + md_error(mddev, rdev); + } else { + printk(KERN_INFO + "md/raid10:%s: read error corrected" + " (%d sectors at %llu on %s)\n", + mdname(mddev), s, + (unsigned long long)( + sect + rdev->data_offset), + bdevname(rdev->bdev, b)); + atomic_add(s, &rdev->corrected_errors); } + + rdev_dec_pending(rdev, mddev); + rcu_read_lock(); } rcu_read_unlock(); -- cgit v1.2.3 From 560f8e5532d63a314271bfb99d3d1d53c938ed14 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 28 Jul 2011 11:39:23 +1000 Subject: md/raid10: Split handle_read_error out from raid10d. raid10d() is too big and is about to get bigger, so split handle_read_error() out as a separate function. Signed-off-by: NeilBrown --- drivers/md/raid10.c | 123 ++++++++++++++++++++++++++++------------------------ 1 file changed, 66 insertions(+), 57 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index c489b5c6ed6d..f1b749c21717 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1618,21 +1618,81 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio) } } +static void handle_read_error(mddev_t *mddev, r10bio_t *r10_bio) +{ + int slot = r10_bio->read_slot; + int mirror = r10_bio->devs[slot].devnum; + struct bio *bio; + conf_t *conf = mddev->private; + mdk_rdev_t *rdev; + char b[BDEVNAME_SIZE]; + unsigned long do_sync; + + /* we got a read error. Maybe the drive is bad. Maybe just + * the block and we can fix it. + * We freeze all other IO, and try reading the block from + * other devices. When we find one, we re-write + * and check it that fixes the read error. + * This is all done synchronously while the array is + * frozen. + */ + if (mddev->ro == 0) { + freeze_array(conf); + fix_read_error(conf, mddev, r10_bio); + unfreeze_array(conf); + } + rdev_dec_pending(conf->mirrors[mirror].rdev, mddev); + + bio = r10_bio->devs[slot].bio; + r10_bio->devs[slot].bio = + mddev->ro ? IO_BLOCKED : NULL; + mirror = read_balance(conf, r10_bio); + if (mirror == -1) { + printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O" + " read error for block %llu\n", + mdname(mddev), + bdevname(bio->bi_bdev, b), + (unsigned long long)r10_bio->sector); + raid_end_bio_io(r10_bio); + bio_put(bio); + return; + } + + do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC); + bio_put(bio); + slot = r10_bio->read_slot; + rdev = conf->mirrors[mirror].rdev; + printk_ratelimited( + KERN_ERR + "md/raid10:%s: %s: redirecting" + "sector %llu to another mirror\n", + mdname(mddev), + bdevname(rdev->bdev, b), + (unsigned long long)r10_bio->sector); + bio = bio_clone_mddev(r10_bio->master_bio, + GFP_NOIO, mddev); + r10_bio->devs[slot].bio = bio; + bio->bi_sector = r10_bio->devs[slot].addr + + rdev->data_offset; + bio->bi_bdev = rdev->bdev; + bio->bi_rw = READ | do_sync; + bio->bi_private = r10_bio; + bio->bi_end_io = raid10_end_read_request; + generic_make_request(bio); +} + static void raid10d(mddev_t *mddev) { r10bio_t *r10_bio; - struct bio *bio; unsigned long flags; conf_t *conf = mddev->private; struct list_head *head = &conf->retry_list; - mdk_rdev_t *rdev; struct blk_plug plug; md_check_recovery(mddev); blk_start_plug(&plug); for (;;) { - char b[BDEVNAME_SIZE]; flush_pending_writes(conf); @@ -1652,60 +1712,9 @@ static void raid10d(mddev_t *mddev) sync_request_write(mddev, r10_bio); else if (test_bit(R10BIO_IsRecover, &r10_bio->state)) recovery_request_write(mddev, r10_bio); - else { - int slot = r10_bio->read_slot; - int mirror = r10_bio->devs[slot].devnum; - /* we got a read error. Maybe the drive is bad. Maybe just - * the block and we can fix it. - * We freeze all other IO, and try reading the block from - * other devices. When we find one, we re-write - * and check it that fixes the read error. - * This is all done synchronously while the array is - * frozen. - */ - if (mddev->ro == 0) { - freeze_array(conf); - fix_read_error(conf, mddev, r10_bio); - unfreeze_array(conf); - } - rdev_dec_pending(conf->mirrors[mirror].rdev, mddev); - - bio = r10_bio->devs[slot].bio; - r10_bio->devs[slot].bio = - mddev->ro ? IO_BLOCKED : NULL; - mirror = read_balance(conf, r10_bio); - if (mirror == -1) { - printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O" - " read error for block %llu\n", - mdname(mddev), - bdevname(bio->bi_bdev,b), - (unsigned long long)r10_bio->sector); - raid_end_bio_io(r10_bio); - bio_put(bio); - } else { - const unsigned long do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC); - bio_put(bio); - slot = r10_bio->read_slot; - rdev = conf->mirrors[mirror].rdev; - printk_ratelimited( - KERN_ERR - "md/raid10:%s: %s: redirecting" - "sector %llu to another mirror\n", - mdname(mddev), - bdevname(rdev->bdev, b), - (unsigned long long)r10_bio->sector); - bio = bio_clone_mddev(r10_bio->master_bio, - GFP_NOIO, mddev); - r10_bio->devs[slot].bio = bio; - bio->bi_sector = r10_bio->devs[slot].addr - + rdev->data_offset; - bio->bi_bdev = rdev->bdev; - bio->bi_rw = READ | do_sync; - bio->bi_private = r10_bio; - bio->bi_end_io = raid10_end_read_request; - generic_make_request(bio); - } - } + else + handle_read_error(mddev, r10_bio); + cond_resched(); if (mddev->flags & ~(1< Date: Thu, 28 Jul 2011 11:39:23 +1000 Subject: md/raid10: avoid reading from known bad blocks - part 1 This patch just covers the basic read path: 1/ read_balance needs to check for badblocks, and return not only the chosen slot, but also how many good blocks are available there. 2/ read submission must be ready to issue multiple reads to different devices as different bad blocks on different devices could mean that a single large read cannot be served by any one device, but can still be served by the array. This requires keeping count of the number of outstanding requests per bio. This count is stored in 'bi_phys_segments' On read error we currently just fail the request if another target cannot handle the whole request. Next patch refines that a bit. Signed-off-by: NeilBrown --- drivers/md/raid10.c | 141 ++++++++++++++++++++++++++++++++++++++++++++++------ drivers/md/raid10.h | 4 ++ 2 files changed, 129 insertions(+), 16 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index f1b749c21717..872bf948f33a 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -191,12 +191,6 @@ static void free_r10bio(r10bio_t *r10_bio) { conf_t *conf = r10_bio->mddev->private; - /* - * Wake up any possible resync thread that waits for the device - * to go idle. - */ - allow_barrier(conf); - put_all_bios(conf, r10_bio); mempool_free(r10_bio, conf->r10bio_pool); } @@ -235,9 +229,27 @@ static void reschedule_retry(r10bio_t *r10_bio) static void raid_end_bio_io(r10bio_t *r10_bio) { struct bio *bio = r10_bio->master_bio; + int done; + conf_t *conf = r10_bio->mddev->private; - bio_endio(bio, - test_bit(R10BIO_Uptodate, &r10_bio->state) ? 0 : -EIO); + if (bio->bi_phys_segments) { + unsigned long flags; + spin_lock_irqsave(&conf->device_lock, flags); + bio->bi_phys_segments--; + done = (bio->bi_phys_segments == 0); + spin_unlock_irqrestore(&conf->device_lock, flags); + } else + done = 1; + if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) + clear_bit(BIO_UPTODATE, &bio->bi_flags); + if (done) { + bio_endio(bio, 0); + /* + * Wake up any possible resync thread that waits for the device + * to go idle. + */ + allow_barrier(conf); + } free_r10bio(r10_bio); } @@ -307,6 +319,7 @@ static void raid10_end_read_request(struct bio *bio, int error) mdname(conf->mddev), bdevname(conf->mirrors[dev].rdev->bdev, b), (unsigned long long)r10_bio->sector); + set_bit(R10BIO_ReadError, &r10_bio->state); reschedule_retry(r10_bio); } } @@ -505,11 +518,12 @@ static int raid10_mergeable_bvec(struct request_queue *q, * FIXME: possibly should rethink readbalancing and do it differently * depending on near_copies / far_copies geometry. */ -static int read_balance(conf_t *conf, r10bio_t *r10_bio) +static int read_balance(conf_t *conf, r10bio_t *r10_bio, int *max_sectors) { const sector_t this_sector = r10_bio->sector; int disk, slot; - const int sectors = r10_bio->sectors; + int sectors = r10_bio->sectors; + int best_good_sectors; sector_t new_distance, best_dist; mdk_rdev_t *rdev; int do_balance; @@ -518,8 +532,10 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio) raid10_find_phys(conf, r10_bio); rcu_read_lock(); retry: + sectors = r10_bio->sectors; best_slot = -1; best_dist = MaxSector; + best_good_sectors = 0; do_balance = 1; /* * Check if we can balance. We can balance on the whole @@ -532,6 +548,10 @@ retry: do_balance = 0; for (slot = 0; slot < conf->copies ; slot++) { + sector_t first_bad; + int bad_sectors; + sector_t dev_sector; + if (r10_bio->devs[slot].bio == IO_BLOCKED) continue; disk = r10_bio->devs[slot].devnum; @@ -541,6 +561,37 @@ retry: if (!test_bit(In_sync, &rdev->flags)) continue; + dev_sector = r10_bio->devs[slot].addr; + if (is_badblock(rdev, dev_sector, sectors, + &first_bad, &bad_sectors)) { + if (best_dist < MaxSector) + /* Already have a better slot */ + continue; + if (first_bad <= dev_sector) { + /* Cannot read here. If this is the + * 'primary' device, then we must not read + * beyond 'bad_sectors' from another device. + */ + bad_sectors -= (dev_sector - first_bad); + if (!do_balance && sectors > bad_sectors) + sectors = bad_sectors; + if (best_good_sectors > sectors) + best_good_sectors = sectors; + } else { + sector_t good_sectors = + first_bad - dev_sector; + if (good_sectors > best_good_sectors) { + best_good_sectors = good_sectors; + best_slot = slot; + } + if (!do_balance) + /* Must read from here */ + break; + } + continue; + } else + best_good_sectors = sectors; + if (!do_balance) break; @@ -582,6 +633,7 @@ retry: } else disk = -1; rcu_read_unlock(); + *max_sectors = best_good_sectors; return disk; } @@ -829,12 +881,27 @@ static int make_request(mddev_t *mddev, struct bio * bio) r10_bio->sector = bio->bi_sector; r10_bio->state = 0; + /* We might need to issue multiple reads to different + * devices if there are bad blocks around, so we keep + * track of the number of reads in bio->bi_phys_segments. + * If this is 0, there is only one r10_bio and no locking + * will be needed when the request completes. If it is + * non-zero, then it is the number of not-completed requests. + */ + bio->bi_phys_segments = 0; + clear_bit(BIO_SEG_VALID, &bio->bi_flags); + if (rw == READ) { /* * read balancing logic: */ - int disk = read_balance(conf, r10_bio); - int slot = r10_bio->read_slot; + int max_sectors; + int disk; + int slot; + +read_again: + disk = read_balance(conf, r10_bio, &max_sectors); + slot = r10_bio->read_slot; if (disk < 0) { raid_end_bio_io(r10_bio); return 0; @@ -842,6 +909,8 @@ static int make_request(mddev_t *mddev, struct bio * bio) mirror = conf->mirrors + disk; read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); + md_trim_bio(read_bio, r10_bio->sector - bio->bi_sector, + max_sectors); r10_bio->devs[slot].bio = read_bio; @@ -852,7 +921,39 @@ static int make_request(mddev_t *mddev, struct bio * bio) read_bio->bi_rw = READ | do_sync; read_bio->bi_private = r10_bio; - generic_make_request(read_bio); + if (max_sectors < r10_bio->sectors) { + /* Could not read all from this device, so we will + * need another r10_bio. + */ + int sectors_handled; + + sectors_handled = (r10_bio->sectors + max_sectors + - bio->bi_sector); + r10_bio->sectors = max_sectors; + spin_lock_irq(&conf->device_lock); + if (bio->bi_phys_segments == 0) + bio->bi_phys_segments = 2; + else + bio->bi_phys_segments++; + spin_unlock(&conf->device_lock); + /* Cannot call generic_make_request directly + * as that will be queued in __generic_make_request + * and subsequent mempool_alloc might block + * waiting for it. so hand bio over to raid10d. + */ + reschedule_retry(r10_bio); + + r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); + + r10_bio->master_bio = bio; + r10_bio->sectors = ((bio->bi_size >> 9) + - sectors_handled); + r10_bio->state = 0; + r10_bio->mddev = mddev; + r10_bio->sector = bio->bi_sector + sectors_handled; + goto read_again; + } else + generic_make_request(read_bio); return 0; } @@ -1627,6 +1728,7 @@ static void handle_read_error(mddev_t *mddev, r10bio_t *r10_bio) mdk_rdev_t *rdev; char b[BDEVNAME_SIZE]; unsigned long do_sync; + int max_sectors; /* we got a read error. Maybe the drive is bad. Maybe just * the block and we can fix it. @@ -1646,8 +1748,8 @@ static void handle_read_error(mddev_t *mddev, r10bio_t *r10_bio) bio = r10_bio->devs[slot].bio; r10_bio->devs[slot].bio = mddev->ro ? IO_BLOCKED : NULL; - mirror = read_balance(conf, r10_bio); - if (mirror == -1) { + mirror = read_balance(conf, r10_bio, &max_sectors); + if (mirror == -1 || max_sectors < r10_bio->sectors) { printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O" " read error for block %llu\n", mdname(mddev), @@ -1712,8 +1814,15 @@ static void raid10d(mddev_t *mddev) sync_request_write(mddev, r10_bio); else if (test_bit(R10BIO_IsRecover, &r10_bio->state)) recovery_request_write(mddev, r10_bio); - else + else if (test_bit(R10BIO_ReadError, &r10_bio->state)) handle_read_error(mddev, r10_bio); + else { + /* just a partial read to be scheduled from a + * separate context + */ + int slot = r10_bio->read_slot; + generic_make_request(r10_bio->devs[slot].bio); + } cond_resched(); if (mddev->flags & ~(1< Date: Thu, 28 Jul 2011 11:39:23 +1000 Subject: md/raid10: avoid reading from known bad blocks - part 2 When redirecting a read error to a different device, we must again avoid bad blocks and possibly split the request. Spin_lock typo fixed thanks to Dan Carpenter Signed-off-by: NeilBrown --- drivers/md/raid10.c | 45 ++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 40 insertions(+), 5 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 872bf948f33a..37801d68a4cd 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1746,14 +1746,15 @@ static void handle_read_error(mddev_t *mddev, r10bio_t *r10_bio) rdev_dec_pending(conf->mirrors[mirror].rdev, mddev); bio = r10_bio->devs[slot].bio; + bdevname(bio->bi_bdev, b); r10_bio->devs[slot].bio = mddev->ro ? IO_BLOCKED : NULL; +read_more: mirror = read_balance(conf, r10_bio, &max_sectors); - if (mirror == -1 || max_sectors < r10_bio->sectors) { + if (mirror == -1) { printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O" " read error for block %llu\n", - mdname(mddev), - bdevname(bio->bi_bdev, b), + mdname(mddev), b, (unsigned long long)r10_bio->sector); raid_end_bio_io(r10_bio); bio_put(bio); @@ -1761,7 +1762,8 @@ static void handle_read_error(mddev_t *mddev, r10bio_t *r10_bio) } do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC); - bio_put(bio); + if (bio) + bio_put(bio); slot = r10_bio->read_slot; rdev = conf->mirrors[mirror].rdev; printk_ratelimited( @@ -1773,6 +1775,9 @@ static void handle_read_error(mddev_t *mddev, r10bio_t *r10_bio) (unsigned long long)r10_bio->sector); bio = bio_clone_mddev(r10_bio->master_bio, GFP_NOIO, mddev); + md_trim_bio(bio, + r10_bio->sector - bio->bi_sector, + max_sectors); r10_bio->devs[slot].bio = bio; bio->bi_sector = r10_bio->devs[slot].addr + rdev->data_offset; @@ -1780,7 +1785,37 @@ static void handle_read_error(mddev_t *mddev, r10bio_t *r10_bio) bio->bi_rw = READ | do_sync; bio->bi_private = r10_bio; bio->bi_end_io = raid10_end_read_request; - generic_make_request(bio); + if (max_sectors < r10_bio->sectors) { + /* Drat - have to split this up more */ + struct bio *mbio = r10_bio->master_bio; + int sectors_handled = + r10_bio->sector + max_sectors + - mbio->bi_sector; + r10_bio->sectors = max_sectors; + spin_lock_irq(&conf->device_lock); + if (mbio->bi_phys_segments == 0) + mbio->bi_phys_segments = 2; + else + mbio->bi_phys_segments++; + spin_unlock_irq(&conf->device_lock); + generic_make_request(bio); + bio = NULL; + + r10_bio = mempool_alloc(conf->r10bio_pool, + GFP_NOIO); + r10_bio->master_bio = mbio; + r10_bio->sectors = (mbio->bi_size >> 9) + - sectors_handled; + r10_bio->state = 0; + set_bit(R10BIO_ReadError, + &r10_bio->state); + r10_bio->mddev = mddev; + r10_bio->sector = mbio->bi_sector + + sectors_handled; + + goto read_more; + } else + generic_make_request(bio); } static void raid10d(mddev_t *mddev) -- cgit v1.2.3 From 8dbed5cebdf6796bf2618457b3653cf820934366 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 28 Jul 2011 11:39:24 +1000 Subject: md/raid10 - avoid reading from known bad blocks - part 3 When attempting to repair a read error, don't read from devices with a known bad block. As we are only reading PAGE_SIZE blocks, we don't try to narrow down to smaller regions in the hope that only part of this page is bad - it isn't worth the effort. Signed-off-by: NeilBrown --- drivers/md/raid10.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/md') diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 37801d68a4cd..a5ecea2672b5 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1594,10 +1594,15 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio) rcu_read_lock(); do { + sector_t first_bad; + int bad_sectors; + d = r10_bio->devs[sl].devnum; rdev = rcu_dereference(conf->mirrors[d].rdev); if (rdev && - test_bit(In_sync, &rdev->flags)) { + test_bit(In_sync, &rdev->flags) && + is_badblock(rdev, r10_bio->devs[sl].addr + sect, s, + &first_bad, &bad_sectors) == 0) { atomic_inc(&rdev->nr_pending); rcu_read_unlock(); success = sync_page_io(rdev, -- cgit v1.2.3 From 40c356ce5ad1a6be817825e1da1bc7494349cc6d Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 28 Jul 2011 11:39:24 +1000 Subject: md/raid10: avoid reading known bad blocks during resync/recovery. During resync/recovery limit the size of the request to avoid reading into a bad block that does not start at-or-before the current read address. Similarly if there is a bad block at this address, don't allow the current request to extend beyond the end of that bad block. Now that we don't ever read from known bad blocks, it is safe to allow devices with those blocks into the array. Signed-off-by: NeilBrown --- drivers/md/raid10.c | 44 +++++++++++++++++++++++++++++++++++--------- 1 file changed, 35 insertions(+), 9 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index a5ecea2672b5..5f0355832b46 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1203,9 +1203,6 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) int first = 0; int last = conf->raid_disks - 1; - if (rdev->badblocks.count) - return -EINVAL; - if (mddev->recovery_cp < MaxSector) /* only hot-add to in-sync arrays, as recovery is * very different from resync @@ -1927,7 +1924,6 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int i; int max_sync; sector_t sync_blocks; - sector_t sectors_skipped = 0; int chunks_skipped = 0; @@ -2070,10 +2066,28 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, for (j=0; jcopies;j++) { int d = r10_bio->devs[j].devnum; + mdk_rdev_t *rdev; + sector_t sector, first_bad; + int bad_sectors; if (!conf->mirrors[d].rdev || !test_bit(In_sync, &conf->mirrors[d].rdev->flags)) continue; /* This is where we read from */ + rdev = conf->mirrors[d].rdev; + sector = r10_bio->devs[j].addr; + + if (is_badblock(rdev, sector, max_sync, + &first_bad, &bad_sectors)) { + if (first_bad > sector) + max_sync = first_bad - sector; + else { + bad_sectors -= (sector + - first_bad); + if (max_sync > bad_sectors) + max_sync = bad_sectors; + continue; + } + } bio = r10_bio->devs[0].bio; bio->bi_next = biolist; biolist = bio; @@ -2160,12 +2174,28 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, for (i=0; icopies; i++) { int d = r10_bio->devs[i].devnum; + sector_t first_bad, sector; + int bad_sectors; + bio = r10_bio->devs[i].bio; bio->bi_end_io = NULL; clear_bit(BIO_UPTODATE, &bio->bi_flags); if (conf->mirrors[d].rdev == NULL || test_bit(Faulty, &conf->mirrors[d].rdev->flags)) continue; + sector = r10_bio->devs[i].addr; + if (is_badblock(conf->mirrors[d].rdev, + sector, max_sync, + &first_bad, &bad_sectors)) { + if (first_bad > sector) + max_sync = first_bad - sector; + else { + bad_sectors -= (sector - first_bad); + if (max_sync > bad_sectors) + max_sync = max_sync; + continue; + } + } atomic_inc(&conf->mirrors[d].rdev->nr_pending); atomic_inc(&r10_bio->remaining); bio->bi_next = biolist; @@ -2173,7 +2203,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, bio->bi_private = r10_bio; bio->bi_end_io = end_sync_read; bio->bi_rw = READ; - bio->bi_sector = r10_bio->devs[i].addr + + bio->bi_sector = sector + conf->mirrors[d].rdev->data_offset; bio->bi_bdev = conf->mirrors[d].rdev->bdev; count++; @@ -2431,10 +2461,6 @@ static int run(mddev_t *mddev) list_for_each_entry(rdev, &mddev->disks, same_set) { - if (rdev->badblocks.count) { - printk(KERN_ERR "md/raid10: cannot handle bad blocks yet\n"); - goto out_free_conf; - } disk_idx = rdev->raid_disk; if (disk_idx >= conf->raid_disks || disk_idx < 0) -- cgit v1.2.3 From e875ecea266a543e643b19e44cf472f1412708f9 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 28 Jul 2011 11:39:24 +1000 Subject: md/raid10 record bad blocks as needed during recovery. When recovering one or more devices, if all the good devices have bad blocks we should record a bad block on the device being rebuilt. If this fails, we need to abort the recovery. To ensure we don't think that we aborted later than we actually did, we need to move the check for MD_RECOVERY_INTR earlier in md_do_sync, in particular before mddev->curr_resync is updated. Signed-off-by: NeilBrown --- drivers/md/md.c | 9 ++++----- drivers/md/raid10.c | 40 ++++++++++++++++++++++++++++++++-------- 2 files changed, 36 insertions(+), 13 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index a6b6471da2bc..d97a62534798 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -7165,11 +7165,14 @@ void md_do_sync(mddev_t *mddev) atomic_add(sectors, &mddev->recovery_active); } + if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) + break; + j += sectors; if (j>1) mddev->curr_resync = j; mddev->curr_mark_cnt = io_sectors; if (last_check == 0) - /* this is the earliers that rebuilt will be + /* this is the earliest that rebuild will be * visible in /proc/mdstat */ md_new_event(mddev); @@ -7178,10 +7181,6 @@ void md_do_sync(mddev_t *mddev) continue; last_check = io_sectors; - - if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) - break; - repeat: if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) { /* step marks */ diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 5f0355832b46..de6089926273 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -2005,7 +2005,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, max_sync = RESYNC_PAGES << (PAGE_SHIFT-9); if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { /* recovery... the complicated one */ - int j, k; + int j; r10_bio = NULL; for (i=0 ; iraid_disks; i++) { @@ -2013,6 +2013,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, r10bio_t *rb2; sector_t sect; int must_sync; + int any_working; if (conf->mirrors[i].rdev == NULL || test_bit(In_sync, &conf->mirrors[i].rdev->flags)) @@ -2064,7 +2065,9 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, must_sync = bitmap_start_sync(mddev->bitmap, sect, &sync_blocks, still_degraded); + any_working = 0; for (j=0; jcopies;j++) { + int k; int d = r10_bio->devs[j].devnum; mdk_rdev_t *rdev; sector_t sector, first_bad; @@ -2073,6 +2076,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, !test_bit(In_sync, &conf->mirrors[d].rdev->flags)) continue; /* This is where we read from */ + any_working = 1; rdev = conf->mirrors[d].rdev; sector = r10_bio->devs[j].addr; @@ -2121,16 +2125,35 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, break; } if (j == conf->copies) { - /* Cannot recover, so abort the recovery */ + /* Cannot recover, so abort the recovery or + * record a bad block */ put_buf(r10_bio); if (rb2) atomic_dec(&rb2->remaining); r10_bio = rb2; - if (!test_and_set_bit(MD_RECOVERY_INTR, - &mddev->recovery)) - printk(KERN_INFO "md/raid10:%s: insufficient " - "working devices for recovery.\n", - mdname(mddev)); + if (any_working) { + /* problem is that there are bad blocks + * on other device(s) + */ + int k; + for (k = 0; k < conf->copies; k++) + if (r10_bio->devs[k].devnum == i) + break; + if (!rdev_set_badblocks( + conf->mirrors[i].rdev, + r10_bio->devs[k].addr, + max_sync, 0)) + any_working = 0; + } + if (!any_working) { + if (!test_and_set_bit(MD_RECOVERY_INTR, + &mddev->recovery)) + printk(KERN_INFO "md/raid10:%s: insufficient " + "working devices for recovery.\n", + mdname(mddev)); + conf->mirrors[i].recovery_disabled + = mddev->recovery_disabled; + } break; } } @@ -2290,7 +2313,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, return sectors_skipped + nr_sectors; giveup: /* There is nowhere to write, so all non-sync - * drives must be failed, so try the next chunk... + * drives must be failed or in resync, all drives + * have a bad block, so try the next chunk... */ if (sector_nr + max_sync < max_sector) max_sector = sector_nr + max_sync; -- cgit v1.2.3 From d4432c23be957ff061f7b23fd60e8506cb472a55 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 28 Jul 2011 11:39:24 +1000 Subject: md/raid10: avoid writing to known bad blocks on known bad drives. Writing to known bad blocks on drives that have seen a write error is asking for trouble. So try to avoid these blocks. Signed-off-by: NeilBrown --- drivers/md/raid10.c | 105 ++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 93 insertions(+), 12 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index de6089926273..13077a3fd7d2 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -807,6 +807,8 @@ static int make_request(mddev_t *mddev, struct bio * bio) unsigned long flags; mdk_rdev_t *blocked_rdev; int plugged; + int sectors_handled; + int max_sectors; if (unlikely(bio->bi_rw & REQ_FLUSH)) { md_flush_request(mddev, bio); @@ -895,7 +897,6 @@ static int make_request(mddev_t *mddev, struct bio * bio) /* * read balancing logic: */ - int max_sectors; int disk; int slot; @@ -925,8 +926,6 @@ read_again: /* Could not read all from this device, so we will * need another r10_bio. */ - int sectors_handled; - sectors_handled = (r10_bio->sectors + max_sectors - bio->bi_sector); r10_bio->sectors = max_sectors; @@ -963,13 +962,22 @@ read_again: /* first select target devices under rcu_lock and * inc refcount on their rdev. Record them by setting * bios[x] to bio + * If there are known/acknowledged bad blocks on any device + * on which we have seen a write error, we want to avoid + * writing to those blocks. This potentially requires several + * writes to write around the bad blocks. Each set of writes + * gets its own r10_bio with a set of bios attached. The number + * of r10_bios is recored in bio->bi_phys_segments just as with + * the read case. */ plugged = mddev_check_plugged(mddev); raid10_find_phys(conf, r10_bio); - retry_write: +retry_write: blocked_rdev = NULL; rcu_read_lock(); + max_sectors = r10_bio->sectors; + for (i = 0; i < conf->copies; i++) { int d = r10_bio->devs[i].devnum; mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[d].rdev); @@ -978,13 +986,55 @@ read_again: blocked_rdev = rdev; break; } - if (rdev && !test_bit(Faulty, &rdev->flags)) { - atomic_inc(&rdev->nr_pending); - r10_bio->devs[i].bio = bio; - } else { - r10_bio->devs[i].bio = NULL; + r10_bio->devs[i].bio = NULL; + if (!rdev || test_bit(Faulty, &rdev->flags)) { set_bit(R10BIO_Degraded, &r10_bio->state); + continue; + } + if (test_bit(WriteErrorSeen, &rdev->flags)) { + sector_t first_bad; + sector_t dev_sector = r10_bio->devs[i].addr; + int bad_sectors; + int is_bad; + + is_bad = is_badblock(rdev, dev_sector, + max_sectors, + &first_bad, &bad_sectors); + if (is_bad < 0) { + /* Mustn't write here until the bad block + * is acknowledged + */ + atomic_inc(&rdev->nr_pending); + set_bit(BlockedBadBlocks, &rdev->flags); + blocked_rdev = rdev; + break; + } + if (is_bad && first_bad <= dev_sector) { + /* Cannot write here at all */ + bad_sectors -= (dev_sector - first_bad); + if (bad_sectors < max_sectors) + /* Mustn't write more than bad_sectors + * to other devices yet + */ + max_sectors = bad_sectors; + /* We don't set R10BIO_Degraded as that + * only applies if the disk is missing, + * so it might be re-added, and we want to + * know to recover this chunk. + * In this case the device is here, and the + * fact that this chunk is not in-sync is + * recorded in the bad block log. + */ + continue; + } + if (is_bad) { + int good_sectors = first_bad - dev_sector; + if (good_sectors < max_sectors) + max_sectors = good_sectors; + } } + r10_bio->devs[i].bio = bio; + atomic_inc(&rdev->nr_pending); } rcu_read_unlock(); @@ -1004,8 +1054,22 @@ read_again: goto retry_write; } + if (max_sectors < r10_bio->sectors) { + /* We are splitting this into multiple parts, so + * we need to prepare for allocating another r10_bio. + */ + r10_bio->sectors = max_sectors; + spin_lock_irq(&conf->device_lock); + if (bio->bi_phys_segments == 0) + bio->bi_phys_segments = 2; + else + bio->bi_phys_segments++; + spin_unlock_irq(&conf->device_lock); + } + sectors_handled = r10_bio->sector + max_sectors - bio->bi_sector; + atomic_set(&r10_bio->remaining, 1); - bitmap_startwrite(mddev->bitmap, bio->bi_sector, r10_bio->sectors, 0); + bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); for (i = 0; i < conf->copies; i++) { struct bio *mbio; @@ -1014,10 +1078,12 @@ read_again: continue; mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); + md_trim_bio(mbio, r10_bio->sector - bio->bi_sector, + max_sectors); r10_bio->devs[i].bio = mbio; - mbio->bi_sector = r10_bio->devs[i].addr+ - conf->mirrors[d].rdev->data_offset; + mbio->bi_sector = (r10_bio->devs[i].addr+ + conf->mirrors[d].rdev->data_offset); mbio->bi_bdev = conf->mirrors[d].rdev->bdev; mbio->bi_end_io = raid10_end_write_request; mbio->bi_rw = WRITE | do_sync | do_fua; @@ -1042,6 +1108,21 @@ read_again: /* In case raid10d snuck in to freeze_array */ wake_up(&conf->wait_barrier); + if (sectors_handled < (bio->bi_size >> 9)) { + /* We need another r1_bio. It has already been counted + * in bio->bi_phys_segments. + */ + r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); + + r10_bio->master_bio = bio; + r10_bio->sectors = (bio->bi_size >> 9) - sectors_handled; + + r10_bio->mddev = mddev; + r10_bio->sector = bio->bi_sector + sectors_handled; + r10_bio->state = 0; + goto retry_write; + } + if (do_sync || !mddev->bitmap || !plugged) md_wakeup_thread(mddev->thread); return 0; -- cgit v1.2.3 From 749c55e942d91cb27045fe2eb313aa5afe68ae0b Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 28 Jul 2011 11:39:24 +1000 Subject: md/raid10: clear bad-block record when write succeeds. If we succeed in writing to a block that was recorded as being bad, we clear the bad-block record. This requires some delayed handling as the bad-block-list update has to happen in process-context. Signed-off-by: NeilBrown --- drivers/md/raid10.c | 101 +++++++++++++++++++++++++++++++++++++++++++++------- drivers/md/raid10.h | 11 ++++++ 2 files changed, 100 insertions(+), 12 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 13077a3fd7d2..39b2058845f5 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -181,7 +181,7 @@ static void put_all_bios(conf_t *conf, r10bio_t *r10_bio) for (i = 0; i < conf->copies; i++) { struct bio **bio = & r10_bio->devs[i].bio; - if (*bio && *bio != IO_BLOCKED) + if (!BIO_SPECIAL(*bio)) bio_put(*bio); *bio = NULL; } @@ -267,7 +267,8 @@ static inline void update_head_pos(int slot, r10bio_t *r10_bio) /* * Find the disk number which triggered given bio */ -static int find_bio_disk(conf_t *conf, r10bio_t *r10_bio, struct bio *bio) +static int find_bio_disk(conf_t *conf, r10bio_t *r10_bio, + struct bio *bio, int *slotp) { int slot; @@ -278,6 +279,8 @@ static int find_bio_disk(conf_t *conf, r10bio_t *r10_bio, struct bio *bio) BUG_ON(slot == conf->copies); update_head_pos(slot, r10_bio); + if (slotp) + *slotp = slot; return r10_bio->devs[slot].devnum; } @@ -329,9 +332,11 @@ static void raid10_end_write_request(struct bio *bio, int error) int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); r10bio_t *r10_bio = bio->bi_private; int dev; + int dec_rdev = 1; conf_t *conf = r10_bio->mddev->private; + int slot; - dev = find_bio_disk(conf, r10_bio, bio); + dev = find_bio_disk(conf, r10_bio, bio, &slot); /* * this branch is our 'one mirror IO has finished' event handler: @@ -340,7 +345,7 @@ static void raid10_end_write_request(struct bio *bio, int error) md_error(r10_bio->mddev, conf->mirrors[dev].rdev); /* an I/O failed, we can't clear the bitmap */ set_bit(R10BIO_Degraded, &r10_bio->state); - } else + } else { /* * Set R10BIO_Uptodate in our master bio, so that * we will return a good error code for to the higher @@ -350,8 +355,23 @@ static void raid10_end_write_request(struct bio *bio, int error) * user-side. So if something waits for IO, then it will * wait for the 'master' bio. */ + sector_t first_bad; + int bad_sectors; + set_bit(R10BIO_Uptodate, &r10_bio->state); + /* Maybe we can clear some bad blocks. */ + if (is_badblock(conf->mirrors[dev].rdev, + r10_bio->devs[slot].addr, + r10_bio->sectors, + &first_bad, &bad_sectors)) { + bio_put(bio); + r10_bio->devs[slot].bio = IO_MADE_GOOD; + dec_rdev = 0; + set_bit(R10BIO_MadeGood, &r10_bio->state); + } + } + /* * * Let's see if all mirrored write operations have finished @@ -364,10 +384,13 @@ static void raid10_end_write_request(struct bio *bio, int error) !test_bit(R10BIO_Degraded, &r10_bio->state), 0); md_write_end(r10_bio->mddev); - raid_end_bio_io(r10_bio); + if (test_bit(R10BIO_MadeGood, &r10_bio->state)) + reschedule_retry(r10_bio); + else + raid_end_bio_io(r10_bio); } - - rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev); + if (dec_rdev) + rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev); } @@ -1382,7 +1405,7 @@ static void end_sync_read(struct bio *bio, int error) conf_t *conf = r10_bio->mddev->private; int d; - d = find_bio_disk(conf, r10_bio, bio); + d = find_bio_disk(conf, r10_bio, bio, NULL); if (test_bit(BIO_UPTODATE, &bio->bi_flags)) set_bit(R10BIO_Uptodate, &r10_bio->state); @@ -1414,23 +1437,37 @@ static void end_sync_write(struct bio *bio, int error) mddev_t *mddev = r10_bio->mddev; conf_t *conf = mddev->private; int d; + sector_t first_bad; + int bad_sectors; + int slot; - d = find_bio_disk(conf, r10_bio, bio); + d = find_bio_disk(conf, r10_bio, bio, &slot); if (!uptodate) md_error(mddev, conf->mirrors[d].rdev); + else if (is_badblock(conf->mirrors[d].rdev, + r10_bio->devs[slot].addr, + r10_bio->sectors, + &first_bad, &bad_sectors)) + set_bit(R10BIO_MadeGood, &r10_bio->state); rdev_dec_pending(conf->mirrors[d].rdev, mddev); while (atomic_dec_and_test(&r10_bio->remaining)) { if (r10_bio->master_bio == NULL) { /* the primary of several recovery bios */ sector_t s = r10_bio->sectors; - put_buf(r10_bio); + if (test_bit(R10BIO_MadeGood, &r10_bio->state)) + reschedule_retry(r10_bio); + else + put_buf(r10_bio); md_done_sync(mddev, s, 1); break; } else { r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio; - put_buf(r10_bio); + if (test_bit(R10BIO_MadeGood, &r10_bio->state)) + reschedule_retry(r10_bio); + else + put_buf(r10_bio); r10_bio = r10_bio2; } } @@ -1901,6 +1938,44 @@ read_more: generic_make_request(bio); } +static void handle_write_completed(conf_t *conf, r10bio_t *r10_bio) +{ + /* Some sort of write request has finished and it + * succeeded in writing where we thought there was a + * bad block. So forget the bad block. + */ + int m; + mdk_rdev_t *rdev; + + if (test_bit(R10BIO_IsSync, &r10_bio->state) || + test_bit(R10BIO_IsRecover, &r10_bio->state)) { + for (m = 0; m < conf->copies; m++) + if (r10_bio->devs[m].bio && + test_bit(BIO_UPTODATE, + &r10_bio->devs[m].bio->bi_flags)) { + int dev = r10_bio->devs[m].devnum; + rdev = conf->mirrors[dev].rdev; + rdev_clear_badblocks( + rdev, + r10_bio->devs[m].addr, + r10_bio->sectors); + } + put_buf(r10_bio); + } else { + for (m = 0; m < conf->copies; m++) + if (r10_bio->devs[m].bio == IO_MADE_GOOD) { + int dev = r10_bio->devs[m].devnum; + rdev = conf->mirrors[dev].rdev; + rdev_clear_badblocks( + rdev, + r10_bio->devs[m].addr, + r10_bio->sectors); + rdev_dec_pending(rdev, conf->mddev); + } + raid_end_bio_io(r10_bio); + } +} + static void raid10d(mddev_t *mddev) { r10bio_t *r10_bio; @@ -1928,7 +2003,9 @@ static void raid10d(mddev_t *mddev) mddev = r10_bio->mddev; conf = mddev->private; - if (test_bit(R10BIO_IsSync, &r10_bio->state)) + if (test_bit(R10BIO_MadeGood, &r10_bio->state)) + handle_write_completed(conf, r10_bio); + else if (test_bit(R10BIO_IsSync, &r10_bio->state)) sync_request_write(mddev, r10_bio); else if (test_bit(R10BIO_IsRecover, &r10_bio->state)) recovery_request_write(mddev, r10_bio); diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h index c646152ba4e4..d8b7f9af92d5 100644 --- a/drivers/md/raid10.h +++ b/drivers/md/raid10.h @@ -118,6 +118,13 @@ struct r10bio_s { * level, we store IO_BLOCKED in the appropriate 'bios' pointer */ #define IO_BLOCKED ((struct bio*)1) +/* When we successfully write to a known bad-block, we need to remove the + * bad-block marking which must be done from process context. So we record + * the success by setting devs[n].bio to IO_MADE_GOOD + */ +#define IO_MADE_GOOD ((struct bio *)2) + +#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) /* bits for r10bio.state */ #define R10BIO_Uptodate 0 @@ -128,4 +135,8 @@ struct r10bio_s { * so that raid10d knows what to do with them. */ #define R10BIO_ReadError 4 +/* If a write for this request means we can clear some + * known-bad-block records, we set this flag. + */ +#define R10BIO_MadeGood 5 #endif -- cgit v1.2.3 From bd870a16c5946d86126f7203db3c73b71de0a1d8 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 28 Jul 2011 11:39:24 +1000 Subject: md/raid10: Handle write errors by updating badblock log. When we get a write error (in the data area, not in metadata), update the badblock log rather than failing the whole device. As the write may well be many blocks, we trying writing each block individually and only log the ones which fail. Signed-off-by: NeilBrown --- drivers/md/raid10.c | 133 +++++++++++++++++++++++++++++++++++++++++++++------- drivers/md/raid10.h | 1 + 2 files changed, 117 insertions(+), 17 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 39b2058845f5..909450414c67 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -327,6 +327,16 @@ static void raid10_end_read_request(struct bio *bio, int error) } } +static void close_write(r10bio_t *r10_bio) +{ + /* clear the bitmap if all writes complete successfully */ + bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, + r10_bio->sectors, + !test_bit(R10BIO_Degraded, &r10_bio->state), + 0); + md_write_end(r10_bio->mddev); +} + static void raid10_end_write_request(struct bio *bio, int error) { int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); @@ -342,9 +352,9 @@ static void raid10_end_write_request(struct bio *bio, int error) * this branch is our 'one mirror IO has finished' event handler: */ if (!uptodate) { - md_error(r10_bio->mddev, conf->mirrors[dev].rdev); - /* an I/O failed, we can't clear the bitmap */ - set_bit(R10BIO_Degraded, &r10_bio->state); + set_bit(WriteErrorSeen, &conf->mirrors[dev].rdev->flags); + set_bit(R10BIO_WriteError, &r10_bio->state); + dec_rdev = 0; } else { /* * Set R10BIO_Uptodate in our master bio, so that @@ -378,16 +388,15 @@ static void raid10_end_write_request(struct bio *bio, int error) * already. */ if (atomic_dec_and_test(&r10_bio->remaining)) { - /* clear the bitmap if all writes complete successfully */ - bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, - r10_bio->sectors, - !test_bit(R10BIO_Degraded, &r10_bio->state), - 0); - md_write_end(r10_bio->mddev); - if (test_bit(R10BIO_MadeGood, &r10_bio->state)) + if (test_bit(R10BIO_WriteError, &r10_bio->state)) reschedule_retry(r10_bio); - else - raid_end_bio_io(r10_bio); + else { + close_write(r10_bio); + if (test_bit(R10BIO_MadeGood, &r10_bio->state)) + reschedule_retry(r10_bio); + else + raid_end_bio_io(r10_bio); + } } if (dec_rdev) rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev); @@ -1839,6 +1848,82 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio) } } +static void bi_complete(struct bio *bio, int error) +{ + complete((struct completion *)bio->bi_private); +} + +static int submit_bio_wait(int rw, struct bio *bio) +{ + struct completion event; + rw |= REQ_SYNC; + + init_completion(&event); + bio->bi_private = &event; + bio->bi_end_io = bi_complete; + submit_bio(rw, bio); + wait_for_completion(&event); + + return test_bit(BIO_UPTODATE, &bio->bi_flags); +} + +static int narrow_write_error(r10bio_t *r10_bio, int i) +{ + struct bio *bio = r10_bio->master_bio; + mddev_t *mddev = r10_bio->mddev; + conf_t *conf = mddev->private; + mdk_rdev_t *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev; + /* bio has the data to be written to slot 'i' where + * we just recently had a write error. + * We repeatedly clone the bio and trim down to one block, + * then try the write. Where the write fails we record + * a bad block. + * It is conceivable that the bio doesn't exactly align with + * blocks. We must handle this. + * + * We currently own a reference to the rdev. + */ + + int block_sectors; + sector_t sector; + int sectors; + int sect_to_write = r10_bio->sectors; + int ok = 1; + + if (rdev->badblocks.shift < 0) + return 0; + + block_sectors = 1 << rdev->badblocks.shift; + sector = r10_bio->sector; + sectors = ((r10_bio->sector + block_sectors) + & ~(sector_t)(block_sectors - 1)) + - sector; + + while (sect_to_write) { + struct bio *wbio; + if (sectors > sect_to_write) + sectors = sect_to_write; + /* Write at 'sector' for 'sectors' */ + wbio = bio_clone_mddev(bio, GFP_NOIO, mddev); + md_trim_bio(wbio, sector - bio->bi_sector, sectors); + wbio->bi_sector = (r10_bio->devs[i].addr+ + rdev->data_offset+ + (sector - r10_bio->sector)); + wbio->bi_bdev = rdev->bdev; + if (submit_bio_wait(WRITE, wbio) == 0) + /* Failure! */ + ok = rdev_set_badblocks(rdev, sector, + sectors, 0) + && ok; + + bio_put(wbio); + sect_to_write -= sectors; + sector += sectors; + sectors = block_sectors; + } + return ok; +} + static void handle_read_error(mddev_t *mddev, r10bio_t *r10_bio) { int slot = r10_bio->read_slot; @@ -1962,16 +2047,29 @@ static void handle_write_completed(conf_t *conf, r10bio_t *r10_bio) } put_buf(r10_bio); } else { - for (m = 0; m < conf->copies; m++) - if (r10_bio->devs[m].bio == IO_MADE_GOOD) { - int dev = r10_bio->devs[m].devnum; - rdev = conf->mirrors[dev].rdev; + for (m = 0; m < conf->copies; m++) { + int dev = r10_bio->devs[m].devnum; + struct bio *bio = r10_bio->devs[m].bio; + rdev = conf->mirrors[dev].rdev; + if (bio == IO_MADE_GOOD) { rdev_clear_badblocks( rdev, r10_bio->devs[m].addr, r10_bio->sectors); rdev_dec_pending(rdev, conf->mddev); + } else if (bio != NULL && + !test_bit(BIO_UPTODATE, &bio->bi_flags)) { + if (!narrow_write_error(r10_bio, m)) { + md_error(conf->mddev, rdev); + set_bit(R10BIO_Degraded, + &r10_bio->state); + } + rdev_dec_pending(rdev, conf->mddev); } + } + if (test_bit(R10BIO_WriteError, + &r10_bio->state)) + close_write(r10_bio); raid_end_bio_io(r10_bio); } } @@ -2003,7 +2101,8 @@ static void raid10d(mddev_t *mddev) mddev = r10_bio->mddev; conf = mddev->private; - if (test_bit(R10BIO_MadeGood, &r10_bio->state)) + if (test_bit(R10BIO_MadeGood, &r10_bio->state) || + test_bit(R10BIO_WriteError, &r10_bio->state)) handle_write_completed(conf, r10_bio); else if (test_bit(R10BIO_IsSync, &r10_bio->state)) sync_request_write(mddev, r10_bio); diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h index d8b7f9af92d5..79cb52a0d4a2 100644 --- a/drivers/md/raid10.h +++ b/drivers/md/raid10.h @@ -139,4 +139,5 @@ struct r10bio_s { * known-bad-block records, we set this flag. */ #define R10BIO_MadeGood 5 +#define R10BIO_WriteError 6 #endif -- cgit v1.2.3 From f84ee364dd15af11cada1e673f94128f62db189e Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 28 Jul 2011 11:39:25 +1000 Subject: md/raid10: attempt to fix read errors during resync/check We already attempt to fix read errors found during normal IO and a 'repair' process. It is best to try to repair them at any time they are found, so move a test so that during sync and check a read error will be corrected by over-writing with good data. If both (all) devices have known bad blocks in the sync section we won't try to fix even though the bad blocks might not overlap. That should be considered later. Also if we hit a read error during recovery we don't try to fix it. It would only be possible to fix if there were at least three copies of data, which is not very common with RAID10. But it should still be considered later. Signed-off-by: NeilBrown --- drivers/md/raid10.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 909450414c67..10415ddfcb42 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1541,11 +1541,12 @@ static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio) if (j == vcnt) continue; mddev->resync_mismatches += r10_bio->sectors; + if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) + /* Don't fix anything. */ + continue; } - if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) - /* Don't fix anything. */ - continue; - /* Ok, we need to write this bio + /* Ok, we need to write this bio, either to correct an + * inconsistency or to correct an unreadable block. * First we need to fixup bv_offset, bv_len and * bi_vecs, as the read request might have corrupted these */ -- cgit v1.2.3 From 1a0b7cd82657a590f163b090bd9123a3a6b9aae4 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 28 Jul 2011 11:39:25 +1000 Subject: md/raid10: record bad blocks due to write errors during resync/recovery. If we get a write error during resync/recovery don't fail the device but instead record a bad block. If that fails we can then fail the device. Signed-off-by: NeilBrown --- drivers/md/raid10.c | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 10415ddfcb42..e54ff3274eda 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1452,9 +1452,10 @@ static void end_sync_write(struct bio *bio, int error) d = find_bio_disk(conf, r10_bio, bio, &slot); - if (!uptodate) - md_error(mddev, conf->mirrors[d].rdev); - else if (is_badblock(conf->mirrors[d].rdev, + if (!uptodate) { + set_bit(WriteErrorSeen, &conf->mirrors[d].rdev->flags); + set_bit(R10BIO_WriteError, &r10_bio->state); + } else if (is_badblock(conf->mirrors[d].rdev, r10_bio->devs[slot].addr, r10_bio->sectors, &first_bad, &bad_sectors)) @@ -1465,7 +1466,8 @@ static void end_sync_write(struct bio *bio, int error) if (r10_bio->master_bio == NULL) { /* the primary of several recovery bios */ sector_t s = r10_bio->sectors; - if (test_bit(R10BIO_MadeGood, &r10_bio->state)) + if (test_bit(R10BIO_MadeGood, &r10_bio->state) || + test_bit(R10BIO_WriteError, &r10_bio->state)) reschedule_retry(r10_bio); else put_buf(r10_bio); @@ -1473,7 +1475,8 @@ static void end_sync_write(struct bio *bio, int error) break; } else { r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio; - if (test_bit(R10BIO_MadeGood, &r10_bio->state)) + if (test_bit(R10BIO_MadeGood, &r10_bio->state) || + test_bit(R10BIO_WriteError, &r10_bio->state)) reschedule_retry(r10_bio); else put_buf(r10_bio); @@ -2029,23 +2032,33 @@ static void handle_write_completed(conf_t *conf, r10bio_t *r10_bio) /* Some sort of write request has finished and it * succeeded in writing where we thought there was a * bad block. So forget the bad block. + * Or possibly if failed and we need to record + * a bad block. */ int m; mdk_rdev_t *rdev; if (test_bit(R10BIO_IsSync, &r10_bio->state) || test_bit(R10BIO_IsRecover, &r10_bio->state)) { - for (m = 0; m < conf->copies; m++) - if (r10_bio->devs[m].bio && - test_bit(BIO_UPTODATE, + for (m = 0; m < conf->copies; m++) { + int dev = r10_bio->devs[m].devnum; + rdev = conf->mirrors[dev].rdev; + if (r10_bio->devs[m].bio == NULL) + continue; + if (test_bit(BIO_UPTODATE, &r10_bio->devs[m].bio->bi_flags)) { - int dev = r10_bio->devs[m].devnum; - rdev = conf->mirrors[dev].rdev; rdev_clear_badblocks( rdev, r10_bio->devs[m].addr, r10_bio->sectors); + } else { + if (!rdev_set_badblocks( + rdev, + r10_bio->devs[m].addr, + r10_bio->sectors, 0)) + md_error(conf->mddev, rdev); } + } put_buf(r10_bio); } else { for (m = 0; m < conf->copies; m++) { -- cgit v1.2.3 From e684e41db3bad44f1262341300b827c0d94ae220 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 28 Jul 2011 11:39:25 +1000 Subject: md/raid10: simplify read error handling during recovery. If a read error is detected during recovery the code currently fails the read device. This isn't really necessary. recovery_request_write will signal a write error to end_sync_write and it will record a write error on the destination device which will record a bad block there or kick it from the array. So just remove this call to do md_error. Signed-off-by: NeilBrown --- drivers/md/raid10.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index e54ff3274eda..6022e2aaf3aa 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1418,13 +1418,12 @@ static void end_sync_read(struct bio *bio, int error) if (test_bit(BIO_UPTODATE, &bio->bi_flags)) set_bit(R10BIO_Uptodate, &r10_bio->state); - else { + else + /* The write handler will notice the lack of + * R10BIO_Uptodate and record any errors etc + */ atomic_add(r10_bio->sectors, &conf->mirrors[d].rdev->corrected_errors); - if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery)) - md_error(r10_bio->mddev, - conf->mirrors[d].rdev); - } /* for reconstruct, we always reschedule after a read. * for resync, only after all reads -- cgit v1.2.3 From 5e5702898e93eee7d69b6efde109609a89a61001 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 28 Jul 2011 11:39:25 +1000 Subject: md/raid10: Handle read errors during recovery better. Currently when we get a read error during recovery, we simply abort the recovery. Instead, repeat the read in page-sized blocks. On successful reads, write to the target. On read errors, record a bad block on the destination, and only if that fails do we abort the recovery. As we now retry reads we need to know where we read from. This was in bi_sector but that can be changed during a read attempt. So store the correct from_addr and to_addr in the r10_bio for later access. Signed-off-by: NeilBrown --- drivers/md/raid10.c | 154 +++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 121 insertions(+), 33 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 6022e2aaf3aa..fc9ebbab3f62 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1141,7 +1141,7 @@ retry_write: wake_up(&conf->wait_barrier); if (sectors_handled < (bio->bi_size >> 9)) { - /* We need another r1_bio. It has already been counted + /* We need another r10_bio. It has already been counted * in bio->bi_phys_segments. */ r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); @@ -1438,29 +1438,10 @@ static void end_sync_read(struct bio *bio, int error) } } -static void end_sync_write(struct bio *bio, int error) +static void end_sync_request(r10bio_t *r10_bio) { - int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); - r10bio_t *r10_bio = bio->bi_private; mddev_t *mddev = r10_bio->mddev; - conf_t *conf = mddev->private; - int d; - sector_t first_bad; - int bad_sectors; - int slot; - - d = find_bio_disk(conf, r10_bio, bio, &slot); - - if (!uptodate) { - set_bit(WriteErrorSeen, &conf->mirrors[d].rdev->flags); - set_bit(R10BIO_WriteError, &r10_bio->state); - } else if (is_badblock(conf->mirrors[d].rdev, - r10_bio->devs[slot].addr, - r10_bio->sectors, - &first_bad, &bad_sectors)) - set_bit(R10BIO_MadeGood, &r10_bio->state); - rdev_dec_pending(conf->mirrors[d].rdev, mddev); while (atomic_dec_and_test(&r10_bio->remaining)) { if (r10_bio->master_bio == NULL) { /* the primary of several recovery bios */ @@ -1484,6 +1465,33 @@ static void end_sync_write(struct bio *bio, int error) } } +static void end_sync_write(struct bio *bio, int error) +{ + int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); + r10bio_t *r10_bio = bio->bi_private; + mddev_t *mddev = r10_bio->mddev; + conf_t *conf = mddev->private; + int d; + sector_t first_bad; + int bad_sectors; + int slot; + + d = find_bio_disk(conf, r10_bio, bio, &slot); + + if (!uptodate) { + set_bit(WriteErrorSeen, &conf->mirrors[d].rdev->flags); + set_bit(R10BIO_WriteError, &r10_bio->state); + } else if (is_badblock(conf->mirrors[d].rdev, + r10_bio->devs[slot].addr, + r10_bio->sectors, + &first_bad, &bad_sectors)) + set_bit(R10BIO_MadeGood, &r10_bio->state); + + rdev_dec_pending(conf->mirrors[d].rdev, mddev); + + end_sync_request(r10_bio); +} + /* * Note: sync and recover and handled very differently for raid10 * This code is for resync. @@ -1600,6 +1608,84 @@ done: * The second for writing. * */ +static void fix_recovery_read_error(r10bio_t *r10_bio) +{ + /* We got a read error during recovery. + * We repeat the read in smaller page-sized sections. + * If a read succeeds, write it to the new device or record + * a bad block if we cannot. + * If a read fails, record a bad block on both old and + * new devices. + */ + mddev_t *mddev = r10_bio->mddev; + conf_t *conf = mddev->private; + struct bio *bio = r10_bio->devs[0].bio; + sector_t sect = 0; + int sectors = r10_bio->sectors; + int idx = 0; + int dr = r10_bio->devs[0].devnum; + int dw = r10_bio->devs[1].devnum; + + while (sectors) { + int s = sectors; + mdk_rdev_t *rdev; + sector_t addr; + int ok; + + if (s > (PAGE_SIZE>>9)) + s = PAGE_SIZE >> 9; + + rdev = conf->mirrors[dr].rdev; + addr = r10_bio->devs[0].addr + sect, + ok = sync_page_io(rdev, + addr, + s << 9, + bio->bi_io_vec[idx].bv_page, + READ, false); + if (ok) { + rdev = conf->mirrors[dw].rdev; + addr = r10_bio->devs[1].addr + sect; + ok = sync_page_io(rdev, + addr, + s << 9, + bio->bi_io_vec[idx].bv_page, + WRITE, false); + if (!ok) + set_bit(WriteErrorSeen, &rdev->flags); + } + if (!ok) { + /* We don't worry if we cannot set a bad block - + * it really is bad so there is no loss in not + * recording it yet + */ + rdev_set_badblocks(rdev, addr, s, 0); + + if (rdev != conf->mirrors[dw].rdev) { + /* need bad block on destination too */ + mdk_rdev_t *rdev2 = conf->mirrors[dw].rdev; + addr = r10_bio->devs[1].addr + sect; + ok = rdev_set_badblocks(rdev2, addr, s, 0); + if (!ok) { + /* just abort the recovery */ + printk(KERN_NOTICE + "md/raid10:%s: recovery aborted" + " due to read error\n", + mdname(mddev)); + + conf->mirrors[dw].recovery_disabled + = mddev->recovery_disabled; + set_bit(MD_RECOVERY_INTR, + &mddev->recovery); + break; + } + } + } + + sectors -= s; + sect += s; + idx++; + } +} static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio) { @@ -1607,6 +1693,12 @@ static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio) int d; struct bio *wbio; + if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) { + fix_recovery_read_error(r10_bio); + end_sync_request(r10_bio); + return; + } + /* * share the pages with the first bio * and submit the write request @@ -1616,16 +1708,7 @@ static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio) atomic_inc(&conf->mirrors[d].rdev->nr_pending); md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9); - if (test_bit(R10BIO_Uptodate, &r10_bio->state)) - generic_make_request(wbio); - else { - printk(KERN_NOTICE - "md/raid10:%s: recovery aborted due to read error\n", - mdname(mddev)); - conf->mirrors[d].recovery_disabled = mddev->recovery_disabled; - set_bit(MD_RECOVERY_INTR, &mddev->recovery); - bio_endio(wbio, 0); - } + generic_make_request(wbio); } @@ -2339,6 +2422,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, for (j=0; jcopies;j++) { int k; int d = r10_bio->devs[j].devnum; + sector_t from_addr, to_addr; mdk_rdev_t *rdev; sector_t sector, first_bad; int bad_sectors; @@ -2368,7 +2452,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, bio->bi_private = r10_bio; bio->bi_end_io = end_sync_read; bio->bi_rw = READ; - bio->bi_sector = r10_bio->devs[j].addr + + from_addr = r10_bio->devs[j].addr; + bio->bi_sector = from_addr + conf->mirrors[d].rdev->data_offset; bio->bi_bdev = conf->mirrors[d].rdev->bdev; atomic_inc(&conf->mirrors[d].rdev->nr_pending); @@ -2385,12 +2470,15 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, bio->bi_private = r10_bio; bio->bi_end_io = end_sync_write; bio->bi_rw = WRITE; - bio->bi_sector = r10_bio->devs[k].addr + + to_addr = r10_bio->devs[k].addr; + bio->bi_sector = to_addr + conf->mirrors[i].rdev->data_offset; bio->bi_bdev = conf->mirrors[i].rdev->bdev; r10_bio->devs[0].devnum = d; + r10_bio->devs[0].addr = from_addr; r10_bio->devs[1].devnum = i; + r10_bio->devs[1].addr = to_addr; break; } -- cgit v1.2.3 From 58c54fcca3bac5bf9290cfed31c76e4c4bfbabaf Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 28 Jul 2011 11:39:25 +1000 Subject: md/raid10: handle further errors during fix_read_error better. If we find more read/write errors we should record a bad block before failing the device. Signed-off-by: NeilBrown --- drivers/md/raid10.c | 59 +++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 44 insertions(+), 15 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index fc9ebbab3f62..8b29cd4f01c8 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1749,6 +1749,26 @@ static void check_decay_read_errors(mddev_t *mddev, mdk_rdev_t *rdev) atomic_set(&rdev->read_errors, read_errors >> hours_since_last); } +static int r10_sync_page_io(mdk_rdev_t *rdev, sector_t sector, + int sectors, struct page *page, int rw) +{ + sector_t first_bad; + int bad_sectors; + + if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors) + && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags))) + return -1; + if (sync_page_io(rdev, sector, sectors << 9, page, rw, false)) + /* success */ + return 1; + if (rw == WRITE) + set_bit(WriteErrorSeen, &rdev->flags); + /* need to record an error - either for the block or the device */ + if (!rdev_set_badblocks(rdev, sector, sectors, 0)) + md_error(rdev->mddev, rdev); + return 0; +} + /* * This is a kernel thread which: * @@ -1832,9 +1852,19 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio) rcu_read_unlock(); if (!success) { - /* Cannot read from anywhere -- bye bye array */ + /* Cannot read from anywhere, just mark the block + * as bad on the first device to discourage future + * reads. + */ int dn = r10_bio->devs[r10_bio->read_slot].devnum; - md_error(mddev, conf->mirrors[dn].rdev); + rdev = conf->mirrors[dn].rdev; + + if (!rdev_set_badblocks( + rdev, + r10_bio->devs[r10_bio->read_slot].addr + + sect, + s, 0)) + md_error(mddev, rdev); break; } @@ -1855,10 +1885,10 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio) atomic_inc(&rdev->nr_pending); rcu_read_unlock(); - if (sync_page_io(rdev, - r10_bio->devs[sl].addr + - sect, - s<<9, conf->tmppage, WRITE, false) + if (r10_sync_page_io(rdev, + r10_bio->devs[sl].addr + + sect, + s<<9, conf->tmppage, WRITE) == 0) { /* Well, this device is dead */ printk(KERN_NOTICE @@ -1873,7 +1903,6 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio) "drive\n", mdname(mddev), bdevname(rdev->bdev, b)); - md_error(mddev, rdev); } rdev_dec_pending(rdev, mddev); rcu_read_lock(); @@ -1893,11 +1922,12 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio) atomic_inc(&rdev->nr_pending); rcu_read_unlock(); - if (sync_page_io(rdev, - r10_bio->devs[sl].addr + - sect, - s<<9, conf->tmppage, - READ, false) == 0) { + switch (r10_sync_page_io(rdev, + r10_bio->devs[sl].addr + + sect, + s<<9, conf->tmppage, + READ)) { + case 0: /* Well, this device is dead */ printk(KERN_NOTICE "md/raid10:%s: unable to read back " @@ -1911,9 +1941,8 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio) "drive\n", mdname(mddev), bdevname(rdev->bdev, b)); - - md_error(mddev, rdev); - } else { + break; + case 1: printk(KERN_INFO "md/raid10:%s: read error corrected" " (%d sectors at %llu on %s)\n", -- cgit v1.2.3